Line Hotness Optimization Source Inline Context
1
/*
2
 * Copyright (c) 2008-2016 Stefan Krah. All rights reserved.
3
 *
4
 * Redistribution and use in source and binary forms, with or without
5
 * modification, are permitted provided that the following conditions
6
 * are met:
7
 *
8
 * 1. Redistributions of source code must retain the above copyright
9
 *    notice, this list of conditions and the following disclaimer.
10
 *
11
 * 2. Redistributions in binary form must reproduce the above copyright
12
 *    notice, this list of conditions and the following disclaimer in the
13
 *    documentation and/or other materials provided with the distribution.
14
 *
15
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
16
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25
 * SUCH DAMAGE.
26
 */
27
28
29
#include "mpdecimal.h"
30
#include <stdio.h>
31
#include <stdlib.h>
32
#include <string.h>
33
#include <limits.h>
34
#include <math.h>
35
#include "basearith.h"
36
#include "bits.h"
37
#include "convolute.h"
38
#include "crt.h"
39
#include "mpalloc.h"
40
#include "typearith.h"
41
#include "umodarith.h"
42
43
#ifdef PPRO
44
  #if defined(_MSC_VER)
45
    #include <float.h>
46
    #pragma float_control(precise, on)
47
    #pragma fenv_access(on)
48
  #elif !defined(__OpenBSD__) && !defined(__NetBSD__)
49
    /* C99 */
50
    #include <fenv.h>
51
    #pragma STDC FENV_ACCESS ON
52
  #endif
53
#endif
54
55
56
#if defined(_MSC_VER)
57
  #define ALWAYS_INLINE __forceinline
58
#elif defined(LEGACY_COMPILER)
59
  #define ALWAYS_INLINE
60
  #undef inline
61
  #define inline
62
#else
63
  #ifdef TEST_COVERAGE
64
    #define ALWAYS_INLINE
65
  #else
66
    #define ALWAYS_INLINE inline __attribute__ ((always_inline))
67
  #endif
68
#endif
69
70
71
#define MPD_NEWTONDIV_CUTOFF 1024L
72
73
#define MPD_NEW_STATIC(name, flags, exp, digits, len) \
74
        mpd_uint_t name##_data[MPD_MINALLOC_MAX];                    \
75
        mpd_t name = {flags|MPD_STATIC|MPD_STATIC_DATA, exp, digits, \
76
                      len, MPD_MINALLOC_MAX, name##_data}
77
78
#define MPD_NEW_CONST(name, flags, exp, digits, len, alloc, initval) \
79
        mpd_uint_t name##_data[alloc] = {initval};                   \
80
        mpd_t name = {flags|MPD_STATIC|MPD_CONST_DATA, exp, digits,  \
81
                      len, alloc, name##_data}
82
83
#define MPD_NEW_SHARED(name, a) \
84
        mpd_t name = {(a->flags&~MPD_DATAFLAGS)|MPD_STATIC|MPD_SHARED_DATA, \
85
                      a->exp, a->digits, a->len, a->alloc, a->data}
86
87
88
static mpd_uint_t data_one[1] = {1};
89
static mpd_uint_t data_zero[1] = {0};
90
static const mpd_t one = {MPD_STATIC|MPD_CONST_DATA, 0, 1, 1, 1, data_one};
91
static const mpd_t minus_one = {MPD_NEG|MPD_STATIC|MPD_CONST_DATA, 0, 1, 1, 1,
92
                                data_one};
93
static const mpd_t zero = {MPD_STATIC|MPD_CONST_DATA, 0, 1, 1, 1, data_zero};
94
95
static inline void _mpd_check_exp(mpd_t *dec, const mpd_context_t *ctx,
96
                                  uint32_t *status);
97
static void _settriple(mpd_t *result, uint8_t sign, mpd_uint_t a,
98
                       mpd_ssize_t exp);
99
static inline mpd_ssize_t _mpd_real_size(mpd_uint_t *data, mpd_ssize_t size);
100
101
static int _mpd_cmp_abs(const mpd_t *a, const mpd_t *b);
102
103
static void _mpd_qadd(mpd_t *result, const mpd_t *a, const mpd_t *b,
104
                      const mpd_context_t *ctx, uint32_t *status);
105
static inline void _mpd_qmul(mpd_t *result, const mpd_t *a, const mpd_t *b,
106
                             const mpd_context_t *ctx, uint32_t *status);
107
static void _mpd_base_ndivmod(mpd_t *q, mpd_t *r, const mpd_t *a,
108
                              const mpd_t *b, uint32_t *status);
109
static inline void _mpd_qpow_uint(mpd_t *result, const mpd_t *base,
110
                                  mpd_uint_t exp, uint8_t resultsign,
111
                                  const mpd_context_t *ctx, uint32_t *status);
112
113
static mpd_uint_t mpd_qsshiftr(mpd_t *result, const mpd_t *a, mpd_ssize_t n);
114
115
116
/******************************************************************************/
117
/*                                  Version                                   */
118
/******************************************************************************/
119
120
const char *
121
mpd_version(void)
122
{
123
    return MPD_VERSION;
124
}
125
126
127
/******************************************************************************/
128
/*                  Performance critical inline functions                     */
129
/******************************************************************************/
130
131
#ifdef CONFIG_64
132
/* Digits in a word, primarily useful for the most significant word. */
133
ALWAYS_INLINE int
134
mpd_word_digits(mpd_uint_t word)
135
{
136
    if (word < mpd_pow10[9]) {
gvn
               
load of type i64 eliminated in favor of load 
mpd_qand
gvn
               
load of type i64 eliminated in favor of load 
mpd_qor
gvn
               
load of type i64 eliminated in favor of load 
mpd_qxor
gvn
               
load of type i64 eliminated in favor of load 
_mpd_qreciprocal_approx
gvn
               
load of type i64 eliminated in favor of load 
_mpd_qdivmod
licm
               
hosting load 
_mpd_qexp
gvn
               
load of type i64 eliminated in favor of load 
_mpd_qexp
gvn
               
load of type i64 eliminated in favor of load 
_mpd_qln
gvn
               
load of type i64 eliminated in favor of load 
mpd_qlog10
gvn
               
load of type i64 eliminated in favor of load 
_qcheck_pow_bounds
gvn
               
load of type i64 eliminated in favor of phi 
mpd_qpow
137
        if (word < mpd_pow10[4]) {
licm
                   
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_qexp
138
            if (word < mpd_pow10[2]) {
licm
                       
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_qexp
139
                return (word < mpd_pow10[1]) ? 1 : 2;
licm
                               
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_qexp
140
            }
141
            return (word < mpd_pow10[3]) ? 3 : 4;
licm
                           
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_qexp
142
        }
143
        if (word < mpd_pow10[6]) {
licm
                   
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_qexp
144
            return (word < mpd_pow10[5]) ? 5 : 6;
licm
                           
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_qexp
145
        }
146
        if (word < mpd_pow10[8]) {
licm
                   
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_qexp
147
            return (word < mpd_pow10[7]) ? 7 : 8;
licm
                           
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_qexp
148
        }
149
        return 9;
150
    }
151
    if (word < mpd_pow10[14]) {
licm
               
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_qexp
loop-vectorize
        
loop not vectorized: control flow cannot be substituted for a select 
_mpd_qexp
152
        if (word < mpd_pow10[11]) {
licm
                   
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_qexp
153
            return (word < mpd_pow10[10]) ? 10 : 11;
licm
                           
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_qexp
154
        }
155
        if (word < mpd_pow10[13]) {
licm
                   
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_qexp
156
            return (word < mpd_pow10[12]) ? 12 : 13;
licm
                           
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_qexp
157
        }
158
        return 14;
159
    }
160
    if (word < mpd_pow10[18]) {
licm
               
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_qexp
161
        if (word < mpd_pow10[16]) {
licm
                   
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_qexp
162
            return (word < mpd_pow10[15]) ? 15 : 16;
licm
                           
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_qexp
163
        }
164
        return (word < mpd_pow10[17]) ? 17 : 18;
gvn
                       
load of type i64 eliminated in favor of load 
_mpd_qreciprocal_approx
licm
                       
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_qexp
165
    }
166
167
    return (word < mpd_pow10[19]) ? 19 : 20;
licm
                   
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_qexp
168
}
169
#else
170
ALWAYS_INLINE int
171
mpd_word_digits(mpd_uint_t word)
172
{
173
    if (word < mpd_pow10[4]) {
174
        if (word < mpd_pow10[2]) {
175
            return (word < mpd_pow10[1]) ? 1 : 2;
176
        }
177
        return (word < mpd_pow10[3]) ? 3 : 4;
178
    }
179
    if (word < mpd_pow10[6]) {
180
        return (word < mpd_pow10[5]) ? 5 : 6;
181
    }
182
    if (word < mpd_pow10[8]) {
183
        return (word < mpd_pow10[7]) ? 7 : 8;
184
    }
185
186
    return (word < mpd_pow10[9]) ? 9 : 10;
187
}
188
#endif
189
190
191
/* Adjusted exponent */
192
ALWAYS_INLINE mpd_ssize_t
193
mpd_adjexp(const mpd_t *dec)
194
{
195
    return (dec->exp + dec->digits) - 1;
gvn
                 
load of type i64 eliminated in favor of sub 
_mpd_qexp
gvn
                            
load of type i64 not eliminated because it is clobbered by call 
_mpd_qexp
gvn
                 
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qexp
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qexp
gvn
                 
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qln
gvn
                 
load eliminated by PRE 
_mpd_qln
gvn
                            
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qln
gvn
                            
load of type i64 eliminated in favor of load 
_mpd_qln
gvn
                 
load of type i64 not eliminated because it is clobbered by call 
mpd_qln
gvn
                            
load of type i64 not eliminated because it is clobbered by call 
mpd_qln
gvn
                 
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qlog10
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qlog10
gvn
                 
load of type i64 not eliminated because it is clobbered by call 
mpd_qnext_toward
gvn
                            
load of type i64 not eliminated because it is clobbered by call 
mpd_qnext_toward
gvn
                 
load of type i64 not eliminated in favor of store because it is clobbered by call 
_lower_bound_zeta
gvn
                            
load of type i64 not eliminated in favor of store because it is clobbered by call 
_lower_bound_zeta
gvn
                 
load of type i64 eliminated in favor of load 
_qcheck_pow_bounds
gvn
                            
load of type i64 eliminated in favor of load 
_qcheck_pow_bounds
gvn
                 
load of type i64 not eliminated because it is clobbered by call 
_qcheck_pow_bounds
gvn
                            
load of type i64 not eliminated because it is clobbered by call 
_qcheck_pow_bounds
gvn
                 
load of type i64 not eliminated because it is clobbered by call 
mpd_qpow
gvn
                 
load eliminated by PRE 
mpd_qpow
gvn
                            
load of type i64 not eliminated because it is clobbered by call 
mpd_qpow
gvn
                 
load of type i64 not eliminated because it is clobbered by call 
_mpd_qrescale
gvn
                            
load of type i64 not eliminated because it is clobbered by call 
_mpd_qrescale
gvn
                 
load of type i64 not eliminated because it is clobbered by call 
mpd_qquantize
gvn
                 
load eliminated by PRE 
mpd_qquantize
gvn
                            
load of type i64 not eliminated because it is clobbered by call 
mpd_qquantize
gvn
                 
load of type i64 not eliminated because it is clobbered by call 
mpd_qrem_near
gvn
                            
load of type i64 not eliminated because it is clobbered by call 
mpd_qrem_near
196
}
197
198
/* Etiny */
199
ALWAYS_INLINE mpd_ssize_t
200
mpd_etiny(const mpd_context_t *ctx)
201
{
202
    return ctx->emin - (ctx->prec - 1);
gvn
                
load of type i64 not eliminated because it is clobbered by call 
_mpd_qdiv
gvn
                             
load of type i64 not eliminated because it is clobbered by call 
_mpd_qdiv
gvn
                
load of type i64 not eliminated because it is clobbered by call 
_mpd_qln
gvn
                             
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qln
gvn
                
load of type i64 not eliminated because it is clobbered by call 
_qcheck_pow_bounds
gvn
                             
load of type i64 not eliminated because it is clobbered by call 
_qcheck_pow_bounds
gvn
                
load of type i64 eliminated in favor of load 
_qcheck_pow_bounds
gvn
                             
load of type i64 eliminated in favor of load 
_qcheck_pow_bounds
gvn
                
load of type i64 not eliminated because it is clobbered by call 
mpd_qpow
gvn
                             
load of type i64 not eliminated because it is clobbered by call 
mpd_qpow
gvn
                
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qquantize
gvn
                             
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qquantize
gvn
                
load of type i64 not eliminated because it is clobbered by call 
mpd_qinvroot
gvn
                             
load of type i64 not eliminated because it is clobbered by call 
mpd_qinvroot
203
}
204
205
/* Etop: used for folding down in IEEE clamping */
206
ALWAYS_INLINE mpd_ssize_t
207
mpd_etop(const mpd_context_t *ctx)
208
{
209
    return ctx->emax - (ctx->prec - 1);
gvn
                
load of type i64 not eliminated because it is clobbered by call 
mpd_qnext_minus
gvn
                             
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qnext_minus
gvn
                
load of type i64 not eliminated because it is clobbered by call 
mpd_qnext_plus
gvn
                             
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qnext_plus
gvn
                             
load of type i64 not eliminated because it is clobbered by call 
mpd_qreduce
210
}
211
212
/* Most significant word */
213
ALWAYS_INLINE mpd_uint_t
214
mpd_msword(const mpd_t *dec)
215
{
216
    assert(dec->len > 0);
217
    return dec->data[dec->len-1];
gvn
                
load of type i64* eliminated in favor of load 
_ssettriple
gvn
           
load of type i64 not eliminated because it is clobbered by store 
_ssettriple
gvn
                
load of type i64* not eliminated in favor of load because it is clobbered by call 
_mpd_fix_nan
gvn
           
load of type i64 not eliminated because it is clobbered by store 
_mpd_fix_nan
gvn
                
load of type i64* eliminated in favor of load 
_mpd_fix_nan
gvn
                          
load of type i64 eliminated in favor of phi 
_mpd_fix_nan
gvn
                
load of type i64* not eliminated in favor of load because it is clobbered by call 
_mpd_apply_round_excess
gvn
                
load eliminated by PRE 
_mpd_apply_round_excess
gvn
                          
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_apply_round_excess
gvn
                          
load eliminated by PRE 
_mpd_apply_round_excess
gvn
                
load of type i64* not eliminated because it is clobbered by call 
_mpd_check_exp
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
_mpd_check_exp
gvn
           
load of type i64 not eliminated because it is clobbered by store 
_mpd_check_exp
gvn
                
load of type i64* not eliminated in favor of load because it is clobbered by call 
_mpd_apply_round
gvn
                          
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_apply_round
gvn
                
load of type i64* not eliminated in favor of load because it is clobbered by call 
_mpd_check_round
gvn
                          
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_check_round
gvn
                
load of type i64* not eliminated in favor of load because it is clobbered by call 
mpd_qfinalize
gvn
                          
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qfinalize
gvn
                
load of type i64* eliminated in favor of load 
mpd_qset_uint
gvn
           
load of type i64 not eliminated because it is clobbered by store 
mpd_qset_uint
gvn
                
load of type i64* eliminated in favor of load 
_mpd_qget_uint
gvn
                          
load of type i64 eliminated in favor of load 
_mpd_qget_uint
gvn
           
load of type i64 eliminated in favor of load 
_mpd_qget_uint
gvn
           
load of type i64 not eliminated in favor of load because it is clobbered by store 
_mpd_qget_uint
gvn
                
load of type i64* eliminated in favor of load 
mpd_qcompare
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
mpd_qcompare
gvn
                
load of type i64* eliminated in favor of load 
mpd_qcompare_signal
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
mpd_qcompare_signal
gvn
                
load of type i64* eliminated in favor of load 
mpd_compare_total
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
mpd_compare_total
gvn
                
load of type i64* eliminated in favor of load 
mpd_compare_total_mag
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
mpd_compare_total_mag
gvn
                
load of type i64* not eliminated in favor of load because it is clobbered by call 
_mpd_cap
gvn
           
load of type i64 not eliminated because it is clobbered by store 
_mpd_cap
gvn
                          
load of type i64 eliminated in favor of phi 
_mpd_cap
gvn
                
load of type i64* eliminated in favor of phi 
_mpd_cap
gvn
                
load of type i64* not eliminated in favor of load because it is clobbered by call 
mpd_qand
gvn
                          
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qand
gvn
                
load of type i64* not eliminated because it is clobbered by call 
mpd_qinvert
gvn
                          
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qinvert
gvn
                
load of type i64* not eliminated in favor of load because it is clobbered by call 
mpd_qor
gvn
                          
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qor
gvn
                          
load of type i64 not eliminated because it is clobbered by store 
_mpd_qaddsub
gvn
           
load of type i64 not eliminated because it is clobbered by store 
_mpd_qaddsub
gvn
                
load of type i64* not eliminated because it is clobbered by call 
_mpd_qaddsub
gvn
                          
load of type i64 eliminated in favor of phi 
_mpd_qaddsub
gvn
                
load eliminated by PRE 
_mpd_qaddsub
gvn
                
load of type i64* not eliminated in favor of load because it is clobbered by call 
mpd_qxor
gvn
                          
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qxor
gvn
                
load of type i64* not eliminated because it is clobbered by call 
mpd_qminus
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qminus
gvn
                
load of type i64* not eliminated because it is clobbered by call 
mpd_qplus
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qplus
gvn
                
load of type i64* eliminated in favor of load 
_mpd_qdiv_inf
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
_mpd_qdiv_inf
gvn
                
load of type i64* eliminated in favor of load 
_mpd_qreciprocal_approx
gvn
           
load of type i64 not eliminated because it is clobbered by store 
_mpd_qreciprocal_approx
gvn
                
load of type i64* not eliminated because it is clobbered by call 
_mpd_qmul
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
_mpd_qmul
gvn
           
load of type i64 not eliminated because it is clobbered by store 
_mpd_qreciprocal
gvn
                
load of type i64* eliminated in favor of load 
_mpd_qround_to_integral
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
_mpd_qround_to_integral
gvn
           
load of type i64 not eliminated because it is clobbered by store 
_mpd_base_ndivmod
gvn
           
load of type i64 not eliminated because it is clobbered by store 
_mpd_qdiv
gvn
                
load of type i64* not eliminated in favor of store because it is clobbered by call 
_mpd_qdiv
gvn
                          
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qdiv
gvn
                
load of type i64* eliminated in favor of load 
_mpd_qdiv
gvn
           
load of type i64 not eliminated because it is clobbered by store 
_mpd_qdivmod
gvn
                
load of type i64* not eliminated in favor of load because it is clobbered by call 
_mpd_qdivmod
gvn
                
load of type i64* eliminated in favor of load 
_mpd_qdivmod
gvn
                
load of type i64* eliminated in favor of load 
mpd_qdivmod
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
mpd_qdivmod
gvn
                
load of type i64* eliminated in favor of load 
mpd_qdivint
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
mpd_qdivint
gvn
                
load of type i64* eliminated in favor of load 
_mpd_qexp_check_one
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
_mpd_qexp_check_one
licm
                
hosting getelementptr 
_mpd_qpow_uint
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qpow_uint
licm
                          
hosting getelementptr 
_mpd_qpow_uint
licm
                          
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qpow_uint
gvn
                
load of type i64* not eliminated because it is clobbered by call 
_mpd_qpow_uint
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpow_uint
gvn
                
load of type i64* eliminated in favor of load 
_mpd_qpow_uint
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
_mpd_qpow_uint
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qexp
licm
                          
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qexp
gvn
           
load of type i64 not eliminated because it is clobbered by store 
_mpd_qexp
gvn
                
load of type i64* eliminated in favor of load 
_mpd_qexp
gvn
                          
load of type i64 not eliminated in favor of store because it is clobbered by store 
_mpd_qexp
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qexp
licm
                          
hosting getelementptr 
mpd_qexp
licm
                          
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qexp
gvn
                
load of type i64* eliminated in favor of load 
mpd_qexp
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
mpd_qexp
gvn
                
load of type i64* not eliminated because it is clobbered by call 
mpd_qexp
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qexp
gvn
                
load of type i64* eliminated in favor of load 
_mpd_qln
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
_mpd_qln
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qln
licm
                          
hosting getelementptr 
mpd_qln
licm
                          
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qln
gvn
                
load of type i64* not eliminated because it is clobbered by call 
mpd_qln
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qln
gvn
                
load of type i64* eliminated in favor of load 
mpd_qln
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
mpd_qln
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qlog10
licm
                          
hosting getelementptr 
mpd_qlog10
licm
                          
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qlog10
gvn
                
load of type i64* eliminated in favor of load 
mpd_qlog10
gvn
           
load of type i64 not eliminated because it is clobbered by store 
mpd_qlog10
gvn
                
load of type i64* not eliminated because it is clobbered by call 
mpd_qlog10
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qlog10
gvn
                
load of type i64* not eliminated because it is clobbered by call 
mpd_qnext_toward
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qnext_toward
gvn
                
load of type i64* not eliminated because it is clobbered by call 
_qcheck_pow_one
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
_qcheck_pow_one
gvn
                
load of type i64* eliminated in favor of load 
_qcheck_pow_one
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
_qcheck_pow_one
gvn
                
load of type i64* eliminated in favor of load 
_qcheck_pow_bounds
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
_qcheck_pow_bounds
licm
                
hosting getelementptr 
_mpd_qpow_mpd
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qpow_mpd
licm
                          
hosting getelementptr 
_mpd_qpow_mpd
licm
                          
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qpow_mpd
gvn
                
load of type i64* not eliminated because it is clobbered by call 
_mpd_qpow_mpd
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpow_mpd
gvn
                
load eliminated by PRE 
_mpd_qpow_mpd
gvn
                          
load eliminated by PRE 
_mpd_qpow_mpd
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qpow_int
licm
                          
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qpow_int
gvn
                
load of type i64* not eliminated because it is clobbered by call 
_mpd_qpow_int
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpow_int
gvn
                
load of type i64* eliminated in favor of load 
_mpd_qpow_int
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
_mpd_qpow_int
gvn
           
load eliminated by PRE 
_mpd_qpow_int
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qpow
licm
                          
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qpow
gvn
                
load of type i64* not eliminated because it is clobbered by call 
mpd_qpow
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qpow
gvn
                
load of type i64* eliminated in favor of load 
mpd_qpow
gvn
                          
load of type i64 eliminated in favor of load 
mpd_qpow
gvn
           
load of type i64 eliminated in favor of load 
mpd_qpow
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
mpd_qpow
gvn
                
load of type i64* not eliminated because it is clobbered by call 
_mpd_qrescale
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
_mpd_qrescale
gvn
                
load of type i64* eliminated in favor of load 
_mpd_qrescale
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
_mpd_qrescale
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qpowmod
licm
                          
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qpowmod
gvn
                
load of type i64* eliminated in favor of load 
mpd_qpowmod
gvn
                          
load of type i64 eliminated in favor of load 
mpd_qpowmod
gvn
           
load of type i64 eliminated in favor of load 
mpd_qpowmod
gvn
                
load of type i64* not eliminated in favor of store because it is clobbered by call 
mpd_qpowmod
gvn
                          
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qpowmod
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
mpd_qpowmod
gvn
                
load of type i64* not eliminated in favor of load because it is clobbered by call 
_mpd_apply_round_fit
gvn
                
load eliminated by PRE 
_mpd_apply_round_fit
gvn
                          
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_apply_round_fit
gvn
                          
load eliminated by PRE 
_mpd_apply_round_fit
gvn
                
load of type i64* eliminated in favor of load 
mpd_qquantize
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
mpd_qquantize
gvn
                
load of type i64* not eliminated because it is clobbered by call 
mpd_qreduce
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qreduce
gvn
                
load of type i64* eliminated in favor of phi 
mpd_qreduce
gvn
           
load of type i64 not eliminated because it is clobbered by store 
mpd_qreduce
gvn
                
load of type i64* not eliminated because it is clobbered by call 
mpd_qrem_near
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qrem_near
gvn
                
load of type i64* eliminated in favor of load 
mpd_qrem_near
gvn
                          
load of type i64 eliminated in favor of load 
mpd_qrem_near
gvn
           
load of type i64 eliminated in favor of load 
mpd_qrem_near
gvn
                
load of type i64* eliminated in favor of load 
_invroot_init_approx
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
_invroot_init_approx
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
_mpd_qinvroot
gvn
                
load of type i64* eliminated in favor of load 
mpd_qinvroot
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
mpd_qinvroot
gvn
                
load of type i64* eliminated in favor of load 
mpd_qsqrt
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by store 
mpd_qsqrt
gvn
                
load of type i64* eliminated in favor of load 
mpd_qexport_u16
gvn
                          
load of type i64 eliminated in favor of load 
mpd_qexport_u16
gvn
           
load of type i64 eliminated in favor of load 
mpd_qexport_u16
gvn
                
load of type i64* not eliminated in favor of load because it is clobbered by store 
mpd_qexport_u16
gvn
                          
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qexport_u16
gvn
                
load of type i64* eliminated in favor of load 
mpd_qexport_u32
gvn
                          
load of type i64 eliminated in favor of load 
mpd_qexport_u32
gvn
           
load of type i64 eliminated in favor of load 
mpd_qexport_u32
gvn
                
load of type i64* not eliminated in favor of load because it is clobbered by store 
mpd_qexport_u32
gvn
                          
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qexport_u32
gvn
                
load of type i64* not eliminated in favor of load because it is clobbered by call 
mpd_qimport_u16
gvn
           
load of type i64 not eliminated because it is clobbered by store 
mpd_qimport_u16
gvn
                
load of type i64* not eliminated in favor of load because it is clobbered by call 
mpd_qimport_u32
gvn
           
load of type i64 not eliminated because it is clobbered by store 
mpd_qimport_u32
218
}
219
220
/* Most significant digit of a word */
221
inline mpd_uint_t
222
mpd_msd(mpd_uint_t word)
223
{
224
    int n;
225
226
    n = mpd_word_digits(word);
inline
        
mpd_word_digits should always be inlined (cost=always) 
mpd_msd
inline
        
mpd_word_digits inlined into mpd_msd 
mpd_msd
227
    return word / mpd_pow10[n-1];
228
}
229
230
/* Least significant digit of a word */
231
ALWAYS_INLINE mpd_uint_t
232
mpd_lsd(mpd_uint_t word)
233
{
234
    return word % 10;
235
}
236
237
/* Coefficient size needed to store 'digits' */
238
ALWAYS_INLINE mpd_ssize_t
239
mpd_digits_to_size(mpd_ssize_t digits)
240
{
241
    mpd_ssize_t q, r;
242
243
    _mpd_idiv_word(&q, &r, digits, MPD_RDIGITS);
inline
    
_mpd_idiv_word can be inlined into mpd_digits_to_size with cost=-30 (threshold=487) 
mpd_digits_to_size
inline
    
_mpd_idiv_word inlined into mpd_digits_to_size 
mpd_digits_to_size
244
    return (r == 0) ? q : q+1;
245
}
246
247
/* Number of digits in the exponent. Not defined for MPD_SSIZE_MIN. */
248
inline int
249
mpd_exp_digits(mpd_ssize_t exp)
250
{
251
    exp = (exp < 0) ? -exp : exp;
252
    return mpd_word_digits(exp);
inline
           
mpd_word_digits should always be inlined (cost=always) 
mpd_exp_digits
inline
           
mpd_word_digits inlined into mpd_exp_digits 
mpd_exp_digits
253
}
254
255
/* Canonical */
256
ALWAYS_INLINE int
257
mpd_iscanonical(const mpd_t *dec UNUSED)
258
{
259
    return 1;
260
}
261
262
/* Finite */
263
ALWAYS_INLINE int
264
mpd_isfinite(const mpd_t *dec)
265
{
266
    return !(dec->flags & MPD_SPECIAL);
licm
                  
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qpowmod
gvn
                  
load of type i8 not eliminated because it is clobbered by store 
mpd_qpowmod
267
}
268
269
/* Infinite */
270
ALWAYS_INLINE int
271
mpd_isinfinite(const mpd_t *dec)
272
{
273
    return dec->flags & MPD_INF;
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qrotate
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qscaleb
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qshift
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
_mpd_qmul
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qdivmod
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qdivint
gvn
                
load of type i8 eliminated in favor of load 
mpd_qdivint
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qnext_toward
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qpow_int
gvn
                
load of type i8 eliminated in favor of phi 
mpd_qpow
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qpow
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qrem
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qquantize
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qrem_near
gvn
                
load of type i8 eliminated in favor of load 
mpd_same_quantum
274
}
275
276
/* NaN */
277
ALWAYS_INLINE int
278
mpd_isnan(const mpd_t *dec)
279
{
280
    return dec->flags & (MPD_NAN|MPD_SNAN);
gvn
                
load of type i8 eliminated in favor of load 
mpd_qcmp
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qexp
gvn
                
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qexp
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qln
gvn
                
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qln
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qlog10
gvn
                
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qlog10
gvn
                
load of type i8 eliminated in favor of load 
mpd_qmax
gvn
                
load of type i8 eliminated in favor of load 
mpd_qmax_mag
gvn
                
load of type i8 eliminated in favor of load 
mpd_qmin
gvn
                
load of type i8 eliminated in favor of load 
mpd_qmin_mag
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qnext_minus
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qnext_plus
licm
                
hosting getelementptr 
_mpd_qpow_mpd
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qpow_mpd
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qpow_mpd
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qpow_int
gvn
                
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qpow_int
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qpow
gvn
                
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qpow
gvn
                
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qrem_near
gvn
                
load of type i8 eliminated in favor of load 
mpd_same_quantum
281
}
282
283
/* Negative */
284
ALWAYS_INLINE int
285
mpd_isnegative(const mpd_t *dec)
286
{
287
    return dec->flags & MPD_NEG;
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
_mpd_check_round
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qfinalize
gvn
                
load of type i8 eliminated in favor of load 
_mpd_qget_uint
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qget_ssize
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qget_i64
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qget_i32
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
_mpd_cmp
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_cmp_total
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qrotate
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qscaleb
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qshift
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qabs
gvn
                
load eliminated by PRE 
mpd_qabs
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qexp
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qln
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qlog10
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qnext_minus
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_qcheck_pow_one
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qpow
gvn
                
load of type i8 eliminated in favor of load 
mpd_qpow
gvn
                
load of type i8 eliminated in favor of load 
mpd_qpowmod
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qquantize
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qinvroot
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qsqrt
288
}
289
290
/* Positive */
291
ALWAYS_INLINE int
292
mpd_ispositive(const mpd_t *dec)
293
{
294
    return !(dec->flags & MPD_NEG);
gvn
                  
load of type i8 not eliminated in favor of load because it is clobbered by call 
_mpd_check_round
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qfinalize
gvn
                  
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qnext_plus
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
_qcheck_pow_one
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qpow
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qquantize
295
}
296
297
/* qNaN */
298
ALWAYS_INLINE int
299
mpd_isqnan(const mpd_t *dec)
300
{
301
    return dec->flags & MPD_NAN;
gvn
                
load eliminated by PRE 
mpd_qmax
gvn
                
load eliminated by PRE 
mpd_qmax_mag
gvn
                
load eliminated by PRE 
mpd_qmin
gvn
                
load eliminated by PRE 
mpd_qmin_mag
302
}
303
304
/* Signed */
305
ALWAYS_INLINE int
306
mpd_issigned(const mpd_t *dec)
307
{
308
    return dec->flags & MPD_NEG;
309
}
310
311
/* sNaN */
312
ALWAYS_INLINE int
313
mpd_issnan(const mpd_t *dec)
314
{
315
    return dec->flags & MPD_SNAN;
316
}
317
318
/* Special */
319
ALWAYS_INLINE int
320
mpd_isspecial(const mpd_t *dec)
321
{
322
    return dec->flags & MPD_SPECIAL;
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qfinalize
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_cap
gvn
                
load of type i8 eliminated in favor of load 
mpd_class
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qminus
gvn
                
load eliminated by PRE 
mpd_qminus
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qplus
gvn
                
load eliminated by PRE 
mpd_qplus
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qmul
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qreciprocal
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_base_ndivmod
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_base_ndivmod
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qdiv
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qdivmod
licm
                
hosting getelementptr 
_mpd_qpow_uint
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qpow_uint
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qpow_uint
licm
                
hosting getelementptr 
mpd_qexp
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qexp
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qexp
gvn
                
load eliminated by PRE 
mpd_qexp
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qln10
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qln10
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qln
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qln
licm
                
hosting getelementptr 
mpd_qln
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qln
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qln
licm
                
hosting getelementptr 
mpd_qlog10
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qlog10
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qlog10
gvn
                
load of type i8 eliminated in favor of load 
mpd_qnext_toward
gvn
                
load of type i8 not eliminated in favor of store because it is clobbered by call 
_lower_bound_zeta
gvn
                
load of type i8 not eliminated in favor of store because it is clobbered by call 
_qcheck_pow_bounds
licm
                
hosting getelementptr 
_mpd_qpow_mpd
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qpow_mpd
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qpow_mpd
gvn
                
load eliminated by PRE 
_mpd_qpow_mpd
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qpow_int
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qpow_int
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qpow
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qpow
gvn
                
load eliminated by PRE 
mpd_qpow
gvn
                
load of type i8 eliminated in favor of phi 
mpd_qpow
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qrescale
gvn
                
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qpowmod
gvn
                
load eliminated by PRE 
mpd_qpowmod
gvn
                
load of type i8 eliminated in favor of phi 
mpd_qpowmod
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qreduce
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qsqrt
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qsqrt
gvn
                
load eliminated by PRE 
mpd_qsqrt
gvn
                
load of type i8 eliminated in favor of load 
mpd_qexport_u16
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qexport_u16
gvn
                
load eliminated by PRE 
mpd_qexport_u16
gvn
                
load of type i8 eliminated in favor of load 
mpd_qexport_u32
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qexport_u32
gvn
                
load eliminated by PRE 
mpd_qexport_u32
323
}
324
325
/* Zero */
326
ALWAYS_INLINE int
327
mpd_iszero(const mpd_t *dec)
328
{
329
    return !mpd_isspecial(dec) && mpd_msword(dec) == 0;
inline
            
mpd_isspecial should always be inlined (cost=always) 
mpd_iszero
inline
            
mpd_isspecial inlined into mpd_iszero 
mpd_iszero
inline
                                  
mpd_msword should always be inlined (cost=always) 
mpd_iszero
inline
                                  
mpd_msword inlined into mpd_iszero 
mpd_iszero
330
}
331
332
/* Test for zero when specials have been ruled out already */
333
ALWAYS_INLINE int
334
mpd_iszerocoeff(const mpd_t *dec)
335
{
336
    return mpd_msword(dec) == 0;
inline
           
mpd_msword should always be inlined (cost=always) 
mpd_iszerocoeff
inline
           
mpd_msword inlined into mpd_iszerocoeff 
mpd_iszerocoeff
337
}
338
339
/* Normal */
340
inline int
341
mpd_isnormal(const mpd_t *dec, const mpd_context_t *ctx)
342
{
343
    if (mpd_isspecial(dec)) return 0;
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_isnormal
inline
        
mpd_isspecial inlined into mpd_isnormal 
mpd_isnormal
344
    if (mpd_iszerocoeff(dec)) return 0;
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_isnormal
inline
        
mpd_iszerocoeff inlined into mpd_isnormal 
mpd_isnormal
345
346
    return mpd_adjexp(dec) >= ctx->emin;
inline
           
mpd_adjexp should always be inlined (cost=always) 
mpd_isnormal
inline
           
mpd_adjexp inlined into mpd_isnormal 
mpd_isnormal
347
}
348
349
/* Subnormal */
350
inline int
351
mpd_issubnormal(const mpd_t *dec, const mpd_context_t *ctx)
352
{
353
    if (mpd_isspecial(dec)) return 0;
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_issubnormal
inline
        
mpd_isspecial inlined into mpd_issubnormal 
mpd_issubnormal
354
    if (mpd_iszerocoeff(dec)) return 0;
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_issubnormal
inline
        
mpd_iszerocoeff inlined into mpd_issubnormal 
mpd_issubnormal
355
356
    return mpd_adjexp(dec) < ctx->emin;
inline
           
mpd_adjexp should always be inlined (cost=always) 
mpd_issubnormal
inline
           
mpd_adjexp inlined into mpd_issubnormal 
mpd_issubnormal
gvn
                                  
load of type i64 not eliminated because it is clobbered by call 
_mpd_qrescale
357
}
358
359
/* Odd word */
360
ALWAYS_INLINE int
361
mpd_isoddword(mpd_uint_t word)
362
{
363
    return word & 1;
364
}
365
366
/* Odd coefficient */
367
ALWAYS_INLINE int
368
mpd_isoddcoeff(const mpd_t *dec)
369
{
370
    return mpd_isoddword(dec->data[0]);
inline
           
mpd_isoddword should always be inlined (cost=always) 
mpd_isoddcoeff
inline
           
mpd_isoddword inlined into mpd_isoddcoeff 
mpd_isoddcoeff
gvn
                              
load of type i64* not eliminated because it is clobbered by call 
_mpd_check_round
gvn
                              
load of type i64* not eliminated because it is clobbered by call 
mpd_qfinalize
gvn
                              
load of type i64* not eliminated because it is clobbered by call 
mpd_qquantize
371
}
372
373
/* 0 if dec is positive, 1 if dec is negative */
374
ALWAYS_INLINE uint8_t
375
mpd_sign(const mpd_t *dec)
376
{
377
    return dec->flags & MPD_NEG;
gvn
                
load of type i8 eliminated in favor of load 
_mpd_cap
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qaddsub
gvn
                
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qrotate
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qadd
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qsub
gvn
                
load of type i8 eliminated in favor of load 
_mpd_qmul_inf
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qmul
gvn
                
load of type i8 eliminated in favor of load 
_mpd_qreciprocal
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qmax
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qmax_mag
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qmin
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qmin_mag
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qnext_toward
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_qcheck_pow_bounds
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qpow
gvn
                
load of type i8 eliminated in favor of load 
_mpd_qrescale
gvn
                
load of type i8 eliminated in favor of load 
mpd_qquantize
gvn
                
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qrem_near
378
}
379
380
/* 1 if dec is positive, -1 if dec is negative */
381
ALWAYS_INLINE int
382
mpd_arith_sign(const mpd_t *dec)
383
{
384
    return 1 - 2 * mpd_isnegative(dec);
inline
                   
mpd_isnegative should always be inlined (cost=always) 
mpd_arith_sign
inline
                   
mpd_isnegative inlined into mpd_arith_sign 
mpd_arith_sign
385
}
386
387
/* Radix */
388
ALWAYS_INLINE long
389
mpd_radix(void)
390
{
391
    return 10;
392
}
393
394
/* Dynamic decimal */
395
ALWAYS_INLINE int
396
mpd_isdynamic(const mpd_t *dec)
397
{
398
    return !(dec->flags & MPD_STATIC);
gvn
                  
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_del
gvn
                  
load eliminated by PRE 
mpd_del
399
}
400
401
/* Static decimal */
402
ALWAYS_INLINE int
403
mpd_isstatic(const mpd_t *dec)
404
{
405
    return dec->flags & MPD_STATIC;
406
}
407
408
/* Data of decimal is dynamic */
409
ALWAYS_INLINE int
410
mpd_isdynamic_data(const mpd_t *dec)
411
{
412
    return !(dec->flags & MPD_DATAFLAGS);
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qaddsub
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qrotate
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qadd_ssize
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qadd_uint
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qsub_ssize
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qsub_uint
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qadd_u32
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qadd_u64
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qsub_u32
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qsub_u64
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
_mpd_qmul
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qreciprocal
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_base_ndivmod
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qdiv
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qdivmod
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qdivint
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qdiv_ssize
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qdiv_uint
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qdiv_u32
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qdiv_u64
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qfma
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qexp
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qexp
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qln10
gvn
                  
load eliminated by PRE 
mpd_qln10
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qmul_ssize
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qln
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qln
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qlog10
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qlog10
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qmul_uint
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qpow_int
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qpow_real
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qpow
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qrem
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qpowmod
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qrem_near
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by store 
_mpd_qinvroot
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qinvroot
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qsqrt
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qexport_u16
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qexport_u32
413
}
414
415
/* Data of decimal is static */
416
ALWAYS_INLINE int
417
mpd_isstatic_data(const mpd_t *dec)
418
{
419
    return dec->flags & MPD_STATIC_DATA;
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qshiftr_inplace
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_apply_round_excess
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_check_exp
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qsshiftr
gvn
                
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qget_uint
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qcompare
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qcompare_signal
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_compare_total
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_compare_total_mag
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qshiftr
gvn
                
load of type i8 eliminated in favor of and 
mpd_qand
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qand
gvn
                
load of type i8 eliminated in favor of and 
mpd_qinvert
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qinvert
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qlogb
gvn
                
load of type i8 eliminated in favor of and 
mpd_qor
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qor
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qaddsub
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qrotate
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qscaleb
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qshiftn
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qshift
gvn
                
load of type i8 eliminated in favor of and 
mpd_qxor
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qxor
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qminus
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qplus
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qadd
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qsub
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qmul
gvn
                
load of type i8 eliminated in favor of or 
_mpd_qmul
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qmul_exact
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qsub_exact
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qreciprocal
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qreciprocal
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qround_to_integral
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qadd_exact
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_base_ndivmod
gvn
                
load of type i8 not eliminated because it is clobbered by store 
_mpd_base_ndivmod
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qdiv
gvn
                
load of type i8 eliminated in favor of load 
_mpd_qdiv
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qdivmod
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qdivmod
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qdivint
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qexp_check_one
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qfma
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qexp
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qexp
gvn
                
load of type i8 eliminated in favor of 48 
mpd_qexp
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qln10
gvn
                
load of type i8 eliminated in favor of 48 
_mpd_qln
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qln
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qln
gvn
                
load of type i8 eliminated in favor of 48 
mpd_qln
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qlog10
gvn
                
load of type i8 eliminated in favor of 48 
mpd_qlog10
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qmax
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qmax_mag
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qmin
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qmin_mag
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qnext_minus
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qnext_plus
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qnext_toward
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_qcheck_pow_one
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_qcheck_pow_bounds
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qpow_mpd
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qpow_int
gvn
                
load of type i8 eliminated in favor of 48 
_mpd_qpow_int
gvn
                
load of type i8 eliminated in favor of 48 
_mpd_qpow_real
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qpow_real
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qpow
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qrem
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_qpowmod_uint
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qpowmod
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qpowmod
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_mpd_apply_round_fit
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qquantize
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qreduce
gvn
                
load of type i8 eliminated in favor of load 
mpd_qreduce
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qrem_near
gvn
                
load of type i8 eliminated in favor of 48 
mpd_qrem_near
gvn
                
load of type i8 not eliminated because it is clobbered by store 
_mpd_qinvroot
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qinvroot
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qsqrt
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qsqrt
gvn
                
load of type i8 eliminated in favor of phi 
mpd_qsqrt
licm
                
hosting getelementptr 
_coeff_from_u16
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
_coeff_from_u16
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_coeff_from_u16
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qimport_u16
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qimport_u16
gvn
                
load of type i8 eliminated in favor of or 
mpd_qimport_u16
licm
                
hosting getelementptr 
_coeff_from_smaller_base
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
_coeff_from_smaller_base
gvn
                
load of type i8 not eliminated because it is clobbered by call 
_coeff_from_smaller_base
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qimport_u32
gvn
                
load of type i8 not eliminated because it is clobbered by call 
mpd_qimport_u32
gvn
                
load of type i8 eliminated in favor of or 
mpd_qimport_u32
420
}
421
422
/* Data of decimal is shared */
423
ALWAYS_INLINE int
424
mpd_isshared_data(const mpd_t *dec)
425
{
426
    return dec->flags & MPD_SHARED_DATA;
427
}
428
429
/* Data of decimal is const */
430
ALWAYS_INLINE int
431
mpd_isconst_data(const mpd_t *dec)
432
{
433
    return dec->flags & MPD_CONST_DATA;
434
}
435
436
437
/******************************************************************************/
438
/*                         Inline memory handling                             */
439
/******************************************************************************/
440
441
/* Fill destination with zeros */
442
ALWAYS_INLINE void
443
mpd_uint_zero(mpd_uint_t *dest, mpd_size_t len)
444
{
445
    mpd_size_t i;
446
447
    for (i = 0; i < len; i++) {
448
        dest[i] = 0;
449
    }
450
}
451
452
/* Free a decimal */
453
ALWAYS_INLINE void
454
mpd_del(mpd_t *dec)
455
{
456
    if (mpd_isdynamic_data(dec)) {
inline
        
mpd_isdynamic_data should always be inlined (cost=always) 
mpd_del
inline
        
mpd_isdynamic_data inlined into mpd_del 
mpd_del
457
        mpd_free(dec->data);
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qaddsub
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
_mpd_qaddsub
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qrotate
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qrotate
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qadd_ssize
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qadd_ssize
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qadd_uint
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qadd_uint
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qsub_ssize
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qsub_ssize
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qsub_uint
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qsub_uint
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qadd_u32
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qadd_u32
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qadd_u64
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qadd_u64
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qsub_u32
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qsub_u32
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qsub_u64
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qsub_u64
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qreciprocal
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
_mpd_qreciprocal
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_base_ndivmod
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
_mpd_base_ndivmod
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qdiv
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
_mpd_qdiv
gvn
                      
load of type i8* eliminated in favor of inttoptr 
_mpd_qdiv
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qdivmod
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
_mpd_qdivmod
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qdivint
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qdivint
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qdiv_ssize
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qdiv_ssize
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qdiv_uint
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qdiv_uint
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qdiv_u32
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qdiv_u32
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qdiv_u64
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qdiv_u64
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qfma
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qfma
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qexp
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
_mpd_qexp
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qexp
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qexp
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qln10
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qln10
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qmul_ssize
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qmul_ssize
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qln
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
_mpd_qln
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qln
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qln
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qlog10
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
_mpd_qlog10
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qlog10
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qlog10
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qmul_uint
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qmul_uint
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_lower_bound_zeta
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
_lower_bound_zeta
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_qcheck_pow_bounds
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
_qcheck_pow_bounds
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qpow_int
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
_mpd_qpow_int
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qpow_real
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
_mpd_qpow_real
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qpow
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qpow
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qrem
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qrem
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qpowmod
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qpowmod
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qrem_near
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qrem_near
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qinvroot
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
_mpd_qinvroot
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qinvroot
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qinvroot
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qsqrt
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qsqrt
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by store 
mpd_qexport_u16
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qexport_u16
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by store 
mpd_qexport_u32
gvn
                      
load of type i8* not eliminated because it is clobbered by call 
mpd_qexport_u32
458
    }
459
    if (mpd_isdynamic(dec)) {
inline
        
mpd_isdynamic should always be inlined (cost=always) 
mpd_del
inline
        
mpd_isdynamic inlined into mpd_del 
mpd_del
460
        mpd_free(dec);
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_del
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qaddsub
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qrotate
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qadd_ssize
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qadd_uint
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qsub_ssize
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qsub_uint
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qadd_u32
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qadd_u64
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qsub_u32
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qsub_u64
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qreciprocal
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_base_ndivmod
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qdiv
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qdivmod
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qdivint
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qdiv_ssize
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qdiv_uint
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qdiv_u32
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qdiv_u64
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qfma
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qexp
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qexp
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qln10
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qmul_ssize
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qln
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qln
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qlog10
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qlog10
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qmul_uint
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_lower_bound_zeta
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_qcheck_pow_bounds
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qpow_int
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qpow_real
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qpow
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qrem
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qpowmod
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qrem_near
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qinvroot
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qinvroot
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qsqrt
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by store 
mpd_qexport_u16
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by store 
mpd_qexport_u32
461
    }
462
}
463
464
/*
465
 * Resize the coefficient. Existing data up to 'nwords' is left untouched.
466
 * Return 1 on success, 0 otherwise.
467
 *
468
 * Input invariant: MPD_MINALLOC <= result->alloc.
469
 *
470
 * Case nwords == result->alloc:
471
 *     'result' is unchanged. Return 1.
472
 *
473
 * Case nwords > result->alloc:
474
 *   Case realloc success:
475
 *     The value of 'result' does not change. Return 1.
476
 *   Case realloc failure:
477
 *     'result' is NaN, status is updated with MPD_Malloc_error. Return 0.
478
 *
479
 * Case nwords < result->alloc:
480
 *   Case is_static_data or realloc failure [1]:
481
 *     'result' is unchanged. Return 1.
482
 *   Case realloc success:
483
 *     The value of result is undefined (expected). Return 1.
484
 *
485
 *
486
 * [1] In that case the old (now oversized) area is still valid.
487
 */
488
ALWAYS_INLINE int
489
mpd_qresize(mpd_t *result, mpd_ssize_t nwords, uint32_t *status)
490
{
491
    assert(!mpd_isconst_data(result)); /* illegal operation for a const */
492
    assert(!mpd_isshared_data(result)); /* illegal operation for a shared */
493
    assert(MPD_MINALLOC <= result->alloc);
494
495
    nwords = (nwords <= MPD_MINALLOC) ? MPD_MINALLOC : nwords;
gvn
                        
load of type i64 not eliminated because it is clobbered by store 
_mpd_fix_nan
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qshiftr_inplace
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_apply_round_excess
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qshiftr
gvn
                        
load of type i64 not eliminated because it is clobbered by store 
_mpd_cap
gvn
                        
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qand
gvn
                        
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qinvert
gvn
                        
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qor
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_qaddsub
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qrotate
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qscaleb
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qshiftn
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qshift
gvn
                        
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qxor
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qminus
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qplus
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_qmul
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_qround_to_integral
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_base_ndivmod
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_qdiv
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_qdivmod
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qdivmod
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_qexp
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qln
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qmax
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qmax_mag
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qmin
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qmin_mag
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qnext_minus
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qnext_plus
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qnext_toward
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpow_mpd
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpow_int
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qpow
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qrem
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpowmod_uint
licm
                        
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qpowmod
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qpowmod
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_apply_round_fit
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qquantize
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qreduce
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qrem_near
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qsqrt
licm
                        
failed to move load with loop-invariant address because the loop may invalidate its value 
_coeff_from_u16
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_coeff_from_u16
licm
                        
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qimport_u16
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qimport_u16
licm
                        
failed to move load with loop-invariant address because the loop may invalidate its value 
_coeff_from_smaller_base
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_coeff_from_smaller_base
licm
                        
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qimport_u32
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qimport_u32
496
    if (nwords == result->alloc) {
gvn
                          
load of type i64 not eliminated because it is clobbered by store 
_mpd_fix_nan
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qshiftr_inplace
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
_mpd_apply_round_excess
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qshiftr
gvn
                          
load of type i64 not eliminated because it is clobbered by store 
_mpd_cap
gvn
                          
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qand
gvn
                          
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qinvert
gvn
                          
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qor
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
_mpd_qaddsub
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qrotate
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qscaleb
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qshiftn
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qshift
gvn
                          
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qxor
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qminus
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qplus
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
_mpd_qmul
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
_mpd_qround_to_integral
gvn
                          
load of type i64 not eliminated because it is clobbered by store 
_mpd_base_ndivmod
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
_mpd_qdiv
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
_mpd_qdivmod
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qdivmod
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
_mpd_qexp
gvn
                          
load of type i64 eliminated in favor of 64 
mpd_qexp
gvn
                          
load of type i64 eliminated in favor of 64 
_mpd_qln
gvn
                          
load of type i64 eliminated in favor of 64 
mpd_qln
gvn
                          
load of type i64 eliminated in favor of 64 
mpd_qlog10
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qmax
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qmax_mag
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qmin
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qmin_mag
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qnext_minus
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qnext_plus
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qnext_toward
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpow_mpd
gvn
                          
load of type i64 eliminated in favor of 64 
_mpd_qpow_int
gvn
                          
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qpow_int
gvn
                          
load of type i64 eliminated in favor of 64 
_mpd_qpow_real
gvn
                          
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qpow
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qrem
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpowmod_uint
licm
                          
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qpowmod
gvn
                          
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qpowmod
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
_mpd_apply_round_fit
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qquantize
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qreduce
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qrem_near
gvn
                          
load of type i64 eliminated in favor of 64 
mpd_qrem_near
gvn
                          
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qsqrt
licm
                          
hosting getelementptr 
_coeff_from_u16
licm
                          
failed to move load with loop-invariant address because the loop may invalidate its value 
_coeff_from_u16
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
_coeff_from_u16
licm
                          
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qimport_u16
gvn
                          
load of type i64 not eliminated because it is clobbered by store 
mpd_qimport_u16
licm
                          
hosting getelementptr 
_coeff_from_smaller_base
licm
                          
failed to move load with loop-invariant address because the loop may invalidate its value 
_coeff_from_smaller_base
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
_coeff_from_smaller_base
licm
                          
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qimport_u32
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
mpd_qimport_u32
497
        return 1;
498
    }
499
    if (mpd_isstatic_data(result)) {
inline
        
mpd_isstatic_data should always be inlined (cost=always) 
mpd_qresize
inline
        
mpd_isstatic_data inlined into mpd_qresize 
mpd_qresize
loop-vectorize
        
loop not vectorized: control flow cannot be substituted for a select 
mpd_qpowmod
500
        if (nwords > result->alloc) {
501
            return mpd_switch_to_dyn(result, nwords, status);
inline
                   
mpd_switch_to_dyn will not be inlined into mpd_qresize because its definition is unavailable 
mpd_qresize
502
        }
503
        return 1;
504
    }
505
506
    return mpd_realloc_dyn(result, nwords, status);
inline
           
mpd_realloc_dyn will not be inlined into mpd_qresize because its definition is unavailable 
mpd_qresize
507
}
508
509
/* Same as mpd_qresize, but the complete coefficient (including the old
510
 * memory area!) is initialized to zero. */
511
ALWAYS_INLINE int
512
mpd_qresize_zero(mpd_t *result, mpd_ssize_t nwords, uint32_t *status)
513
{
514
    assert(!mpd_isconst_data(result)); /* illegal operation for a const */
515
    assert(!mpd_isshared_data(result)); /* illegal operation for a shared */
516
    assert(MPD_MINALLOC <= result->alloc);
517
518
    nwords = (nwords <= MPD_MINALLOC) ? MPD_MINALLOC : nwords;
519
    if (nwords != result->alloc) {
520
        if (mpd_isstatic_data(result)) {
inline
            
mpd_isstatic_data should always be inlined (cost=always) 
mpd_qresize_zero
inline
            
mpd_isstatic_data inlined into mpd_qresize_zero 
mpd_qresize_zero
521
            if (nwords > result->alloc) {
522
                return mpd_switch_to_dyn_zero(result, nwords, status);
inline
                       
mpd_switch_to_dyn_zero will not be inlined into mpd_qresize_zero because its definition is unavailable 
mpd_qresize_zero
523
            }
524
        }
525
        else if (!mpd_realloc_dyn(result, nwords, status)) {
inline
                  
mpd_realloc_dyn will not be inlined into mpd_qresize_zero because its definition is unavailable 
mpd_qresize_zero
526
            return 0;
527
        }
528
    }
529
530
    mpd_uint_zero(result->data, nwords);
inline
    
mpd_uint_zero should always be inlined (cost=always) 
mpd_qresize_zero
inline
    
mpd_uint_zero inlined into mpd_qresize_zero 
mpd_qresize_zero
gvn
                          
load of type i8* not eliminated because it is clobbered by call 
mpd_qresize_zero
gvn
                          
load of type i8* not eliminated because it is clobbered by call 
mpd_resize_zero
531
    return 1;
532
}
533
534
/*
535
 * Reduce memory size for the coefficient to MPD_MINALLOC. In theory,
536
 * realloc may fail even when reducing the memory size. But in that case
537
 * the old memory area is always big enough, so checking for MPD_Malloc_error
538
 * is not imperative.
539
 */
540
ALWAYS_INLINE void
541
mpd_minalloc(mpd_t *result)
542
{
543
    assert(!mpd_isconst_data(result)); /* illegal operation for a const */
544
    assert(!mpd_isshared_data(result)); /* illegal operation for a shared */
545
546
    if (!mpd_isstatic_data(result) && result->alloc > MPD_MINALLOC) {
inline
         
mpd_isstatic_data should always be inlined (cost=always) 
mpd_minalloc
inline
         
mpd_isstatic_data inlined into mpd_minalloc 
mpd_minalloc
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qshiftr_inplace
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qshiftr_inplace
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
_mpd_check_exp
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
_mpd_check_exp
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qsshiftr
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qsshiftr
gvn
                                              
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qget_uint
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
_mpd_qget_uint
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qcompare
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qcompare
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qcompare_signal
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qcompare_signal
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_compare_total
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_compare_total
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_compare_total_mag
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_compare_total_mag
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qshiftr
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qshiftr
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
_mpd_cap
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
_mpd_cap
gvn
                                              
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qand
gvn
                                                      
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qand
gvn
                                              
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qinvert
gvn
                                                      
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qinvert
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qlogb
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qlogb
gvn
                                              
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qor
gvn
                                                      
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qor
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
_mpd_qaddsub
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
_mpd_qaddsub
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qrotate
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qrotate
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qscaleb
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qscaleb
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qshift
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qshift
gvn
                                              
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qxor
gvn
                                                      
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qxor
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qadd
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qadd
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qsub
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qsub
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
_mpd_qmul
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
_mpd_qmul
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
_mpd_qmul_exact
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
_mpd_qmul_exact
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
_mpd_qsub_exact
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
_mpd_qsub_exact
licm
                                              
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qreciprocal
licm
                                                      
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qreciprocal
gvn
                                              
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qreciprocal
gvn
                                                      
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qreciprocal
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
_mpd_qadd_exact
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
_mpd_qadd_exact
licm
                                              
hosting getelementptr 
_mpd_base_ndivmod
licm
                                              
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_base_ndivmod
licm
                                                      
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_base_ndivmod
gvn
                                              
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_base_ndivmod
gvn
                                                      
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_base_ndivmod
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
_mpd_qdiv
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
_mpd_qdiv
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
_mpd_qdivmod
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
_mpd_qdivmod
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qdivmod
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qdivmod
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qdivint
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qdivint
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
_mpd_qexp_check_one
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
_mpd_qexp_check_one
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qfma
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qfma
gvn
                                              
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qexp
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
_mpd_qexp
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qexp
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qexp
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qln10
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qln10
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
_mpd_qln
gvn
                                                      
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qln
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qln
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qln
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qlog10
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qlog10
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
_qcheck_pow_one
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
_qcheck_pow_one
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
_qcheck_pow_bounds
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
_qcheck_pow_bounds
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpow_mpd
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpow_mpd
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpow_int
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpow_int
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpow_real
gvn
                                                      
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qpow_real
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qpow
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qpow
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qrem
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qrem
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qpowmod
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qpowmod
gvn
                                              
load of type i64 not eliminated because it is clobbered by store 
_mpd_apply_round_fit
gvn
                                                      
load of type i64 not eliminated because it is clobbered by store 
_mpd_apply_round_fit
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qquantize
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qquantize
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qreduce
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qreduce
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qrem_near
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qrem_near
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
_mpd_qinvroot
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
_mpd_qinvroot
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qinvroot
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qinvroot
licm
                                              
hosting getelementptr 
mpd_qsqrt
licm
                                              
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qsqrt
licm
                                                      
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qsqrt
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qsqrt
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qsqrt
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qimport_u16
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qimport_u16
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qimport_u32
gvn
                                                      
load of type i64 not eliminated because it is clobbered by call 
mpd_qimport_u32
loop-vectorize
                                   
loop not vectorized: control flow cannot be substituted for a select 
_mpd_base_ndivmod
547
        uint8_t err = 0;
548
        result->data = mpd_realloc(result->data, MPD_MINALLOC,
inline
                       
mpd_realloc will not be inlined into mpd_minalloc because its definition is unavailable 
mpd_minalloc
gvn
                                                 
load of type i64 eliminated in favor of load 
mpd_minalloc
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qshiftr_inplace
gvn
                                           
load of type i8* eliminated in favor of inttoptr 
_mpd_check_exp
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qsshiftr
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
_mpd_qget_uint
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qcompare
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qcompare_signal
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_compare_total
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_compare_total_mag
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qshiftr
gvn
                                           
load of type i8* eliminated in favor of inttoptr 
_mpd_cap
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qand
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qinvert
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qlogb
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qor
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
_mpd_qaddsub
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qrotate
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qscaleb
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qshift
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qxor
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qadd
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qsub
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
_mpd_qmul
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
_mpd_qmul_exact
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
_mpd_qsub_exact
licm
                                           
hosting bitcast 
_mpd_qreciprocal
licm
                                           
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qreciprocal
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
_mpd_qreciprocal
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
_mpd_qadd_exact
licm
                                           
hosting getelementptr 
_mpd_base_ndivmod
licm
                                           
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_base_ndivmod
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
_mpd_base_ndivmod
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
_mpd_qdiv
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
_mpd_qdivmod
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qdivmod
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qdivint
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
_mpd_qexp_check_one
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qfma
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
_mpd_qexp
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qexp
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qln10
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
_mpd_qln
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qln
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qlog10
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
_qcheck_pow_one
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
_qcheck_pow_bounds
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
_mpd_qpow_mpd
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
_mpd_qpow_int
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
_mpd_qpow_real
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qpow
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qrem
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qpowmod
gvn
                                           
load of type i8* eliminated in favor of phi 
_mpd_apply_round_fit
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qquantize
gvn
                                           
load of type i8* eliminated in favor of inttoptr 
mpd_qreduce
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qrem_near
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
_mpd_qinvroot
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qinvroot
licm
                                           
hosting getelementptr 
mpd_qsqrt
licm
                                           
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qsqrt
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qsqrt
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qimport_u16
gvn
                                           
load of type i8* not eliminated because it is clobbered by call 
mpd_qimport_u32
549
                                   sizeof *result->data, &err);
550
        if (!err) {
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_minalloc
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_zerocoeff
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_setspecial
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_seterror
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_fix_nan
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qshiftr_inplace
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_check_exp
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qset_ssize
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_settriple
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qset_uint
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qset_i32
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qset_i64
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qsshiftr
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qget_uint
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qcompare
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qcompare_signal
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_compare_total
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_compare_total_mag
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qshiftr
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_cap
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qand
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qinvert
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qlogb
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qor
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qaddsub
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qrotate
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qscaleb
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qshiftn
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qshift
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qxor
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qadd
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qsub
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qdiv_inf
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qmul_inf
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qmul
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qmul_exact
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qsub_exact
licm
             
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qreciprocal
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qreciprocal
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qround_to_integral
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qadd_exact
licm
             
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_base_ndivmod
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_base_ndivmod
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qdiv
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qdivmod
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qdivmod
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qdivint
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qexp_check_one
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qfma
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qpow_uint
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qexp
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qexp
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qln10
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qln
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qln
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qlog10
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_qcheck_pow_one
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_qcheck_pow_bounds
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qpow_mpd
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qpow_int
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qpow_real
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qpow
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qrescale
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qrescale
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qrem
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qpowmod
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_apply_round_fit
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qquantize
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qreduce
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qrem_near
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qrescale_fmt
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_invroot_init_approx
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qinvroot
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qinvroot
licm
             
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qsqrt
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qsqrt
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qimport_u16
gvn
             
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qimport_u32
551
            result->alloc = MPD_MINALLOC;
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_minalloc
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_zerocoeff
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_setspecial
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_seterror
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_fix_nan
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qshiftr_inplace
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_check_exp
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qset_ssize
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_settriple
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qset_uint
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qset_i32
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qset_i64
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qsshiftr
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qget_uint
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qcompare
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qcompare_signal
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_compare_total
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_compare_total_mag
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qshiftr
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_cap
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qand
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qinvert
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qlogb
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qor
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qaddsub
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qrotate
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qscaleb
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qshiftn
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qshift
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qxor
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qadd
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qsub
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qdiv_inf
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qmul_inf
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qmul
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qmul_exact
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qsub_exact
licm
                            
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qreciprocal
gvn
                            
load of type i64 not eliminated because it is clobbered by call 
_mpd_qreciprocal
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qround_to_integral
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qadd_exact
licm
                            
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_base_ndivmod
gvn
                            
load of type i64 not eliminated because it is clobbered by call 
_mpd_base_ndivmod
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qdiv
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qdivmod
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qdivmod
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qdivint
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qexp_check_one
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qfma
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qpow_uint
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qexp
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qexp
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qln10
gvn
                            
load of type i64 not eliminated because it is clobbered by call 
_mpd_qln
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qln
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qlog10
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_qcheck_pow_one
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_qcheck_pow_bounds
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qpow_mpd
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qpow_int
gvn
                            
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpow_real
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qpow
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qrescale
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qrescale
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qrem
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qpowmod
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_apply_round_fit
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qquantize
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qreduce
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qrem_near
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qrescale_fmt
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_invroot_init_approx
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qinvroot
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qinvroot
licm
                            
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qsqrt
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qsqrt
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qimport_u16
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qimport_u32
552
        }
553
    }
554
}
555
556
int
557
mpd_resize(mpd_t *result, mpd_ssize_t nwords, mpd_context_t *ctx)
558
{
559
    uint32_t status = 0;
560
    if (!mpd_qresize(result, nwords, &status)) {
inline
         
mpd_qresize should always be inlined (cost=always) 
mpd_resize
inline
         
mpd_qresize inlined into mpd_resize 
mpd_resize
561
        mpd_addstatus_raise(ctx, status);
inline
        
mpd_addstatus_raise will not be inlined into mpd_resize because its definition is unavailable 
mpd_resize
gvn
                                 
load of type i32 not eliminated in favor of store because it is clobbered by call 
mpd_resize
562
        return 0;
563
    }
564
    return 1;
565
}
566
567
int
568
mpd_resize_zero(mpd_t *result, mpd_ssize_t nwords, mpd_context_t *ctx)
569
{
570
    uint32_t status = 0;
571
    if (!mpd_qresize_zero(result, nwords, &status)) {
inline
         
mpd_qresize_zero should always be inlined (cost=always) 
mpd_resize_zero
inline
         
mpd_qresize_zero inlined into mpd_resize_zero 
mpd_resize_zero
572
        mpd_addstatus_raise(ctx, status);
inline
        
mpd_addstatus_raise will not be inlined into mpd_resize_zero because its definition is unavailable 
mpd_resize_zero
gvn
                                 
load of type i32 not eliminated in favor of store because it is clobbered by call 
mpd_resize_zero
573
        return 0;
574
    }
575
    return 1;
576
}
577
578
579
/******************************************************************************/
580
/*                       Set attributes of a decimal                          */
581
/******************************************************************************/
582
583
/* Set digits. Assumption: result->len is initialized and > 0. */
584
inline void
585
mpd_setdigits(mpd_t *result)
586
{
587
    mpd_ssize_t wdigits = mpd_word_digits(mpd_msword(result));
inline
                                          
mpd_msword should always be inlined (cost=always) 
mpd_setdigits
inline
                                          
mpd_msword inlined into mpd_setdigits 
mpd_setdigits
inline
                          
mpd_word_digits should always be inlined (cost=always) 
mpd_setdigits
inline
                          
mpd_word_digits inlined into mpd_setdigits 
mpd_setdigits
588
    result->digits = wdigits + (result->len-1) * MPD_RDIGITS;
gvn
                                        
load of type i64 eliminated in favor of load 
mpd_setdigits
589
}
590
591
/* Set sign */
592
ALWAYS_INLINE void
593
mpd_set_sign(mpd_t *result, uint8_t sign)
594
{
595
    result->flags &= ~MPD_NEG;
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qcopy_sign
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
_mpd_qpow_uint
gvn
                  
load eliminated by PRE 
_mpd_qpow_uint
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qnext_toward
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
_mpd_qpow_mpd
gvn
                  
load eliminated by PRE 
_mpd_qpow_mpd
596
    result->flags |= sign;
597
}
598
599
/* Copy sign from another decimal */
600
ALWAYS_INLINE void
601
mpd_signcpy(mpd_t *result, const mpd_t *a)
602
{
603
    uint8_t sign = a->flags&MPD_NEG;
604
605
    result->flags &= ~MPD_NEG;
606
    result->flags |= sign;
607
}
608
609
/* Set infinity */
610
ALWAYS_INLINE void
611
mpd_set_infinity(mpd_t *result)
612
{
613
    result->flags &= ~MPD_SPECIAL;
614
    result->flags |= MPD_INF;
615
}
616
617
/* Set qNaN */
618
ALWAYS_INLINE void
619
mpd_set_qnan(mpd_t *result)
620
{
621
    result->flags &= ~MPD_SPECIAL;
gvn
                  
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_seterror
gvn
                  
load eliminated by PRE 
mpd_seterror
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qcheck_nan
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qcheck_nans
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qcheck_3nans
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qpowmod
622
    result->flags |= MPD_NAN;
623
}
624
625
/* Set sNaN */
626
ALWAYS_INLINE void
627
mpd_set_snan(mpd_t *result)
628
{
629
    result->flags &= ~MPD_SPECIAL;
630
    result->flags |= MPD_SNAN;
631
}
632
633
/* Set to negative */
634
ALWAYS_INLINE void
635
mpd_set_negative(mpd_t *result)
636
{
637
    result->flags |= MPD_NEG;
gvn
                  
load of type i8 eliminated in favor of and 
_mpd_qln
638
}
639
640
/* Set to positive */
641
ALWAYS_INLINE void
642
mpd_set_positive(mpd_t *result)
643
{
644
    result->flags &= ~MPD_NEG;
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qcopy_abs
gvn
                  
load of type i8 eliminated in favor of or 
mpd_cmp_total_mag
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qminus
gvn
                  
load eliminated by PRE 
mpd_qminus
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qplus
gvn
                  
load eliminated by PRE 
mpd_qplus
gvn
                  
load of type i8 eliminated in favor of or 
_mpd_base_ndivmod
gvn
                  
load of type i8 eliminated in favor of or 
_mpd_qexp_check_one
gvn
                  
load of type i8 eliminated in favor of or 
_qcheck_pow_bounds
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qpowmod
645
}
646
647
/* Set to dynamic */
648
ALWAYS_INLINE void
649
mpd_set_dynamic(mpd_t *result)
650
{
651
    result->flags &= ~MPD_STATIC;
652
}
653
654
/* Set to static */
655
ALWAYS_INLINE void
656
mpd_set_static(mpd_t *result)
657
{
658
    result->flags |= MPD_STATIC;
659
}
660
661
/* Set data to dynamic */
662
ALWAYS_INLINE void
663
mpd_set_dynamic_data(mpd_t *result)
664
{
665
    result->flags &= ~MPD_DATAFLAGS;
gvn
                  
load of type i8 not eliminated in favor of load because it is clobbered by call 
_mpd_qmul
gvn
                  
load eliminated by PRE 
_mpd_qmul
666
}
667
668
/* Set data to static */
669
ALWAYS_INLINE void
670
mpd_set_static_data(mpd_t *result)
671
{
672
    result->flags &= ~MPD_DATAFLAGS;
673
    result->flags |= MPD_STATIC_DATA;
674
}
675
676
/* Set data to shared */
677
ALWAYS_INLINE void
678
mpd_set_shared_data(mpd_t *result)
679
{
680
    result->flags &= ~MPD_DATAFLAGS;
gvn
                  
load of type i8 eliminated in favor of load 
_mpd_copy_shared
681
    result->flags |= MPD_SHARED_DATA;
682
}
683
684
/* Set data to const */
685
ALWAYS_INLINE void
686
mpd_set_const_data(mpd_t *result)
687
{
688
    result->flags &= ~MPD_DATAFLAGS;
689
    result->flags |= MPD_CONST_DATA;
690
}
691
692
/* Clear flags, preserving memory attributes. */
693
ALWAYS_INLINE void
694
mpd_clear_flags(mpd_t *result)
695
{
696
    result->flags &= (MPD_STATIC|MPD_DATAFLAGS);
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qand
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qinvert
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qor
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qxor
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
_mpd_qreciprocal_approx
gvn
                  
load of type i8 eliminated in favor of or 
_mpd_qreciprocal
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
_mpd_qreciprocal
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
_mpd_base_ndivmod
gvn
                  
load of type i8 not eliminated in favor of load because it is clobbered by call 
_mpd_qln
gvn
                  
load eliminated by PRE 
_mpd_qln
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qnext_minus
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qnext_plus
gvn
                  
load of type i8 not eliminated in favor of load because it is clobbered by call 
_invroot_init_approx
gvn
                  
load eliminated by PRE 
_invroot_init_approx
697
}
698
699
/* Set flags, preserving memory attributes. */
700
ALWAYS_INLINE void
701
mpd_set_flags(mpd_t *result, uint8_t flags)
702
{
703
    result->flags &= (MPD_STATIC|MPD_DATAFLAGS);
gvn
                  
load of type i8 not eliminated in favor of load because it is clobbered by call 
_settriple
gvn
                  
load eliminated by PRE 
_settriple
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
_mpd_qaddsub
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
_mpd_qmul
gvn
                  
load of type i8 eliminated in favor of load 
_mpd_qreciprocal
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
_mpd_qdiv
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
_mpd_qdivmod
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
_qcheck_pow_one_inf
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
_qcheck_pow_one
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qpow
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qimport_u16
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qimport_u32
704
    result->flags |= flags;
705
}
706
707
/* Copy flags, preserving memory attributes of result. */
708
ALWAYS_INLINE void
709
mpd_copy_flags(mpd_t *result, const mpd_t *a)
710
{
711
    uint8_t aflags = a->flags;
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qcopy
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qshiftl
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qcopy_static
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qsshiftr
gvn
                        
load of type i8 not eliminated in favor of load because it is clobbered by call 
_mpd_qget_uint
gvn
                        
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qcheck_nan
gvn
                        
load eliminated by PRE 
mpd_qcheck_nan
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qcheck_nans
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qncopy
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qcopy_abs
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qcopy_negate
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qcopy_sign
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qshiftr
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qrotate
gvn
                        
load eliminated by PRE 
mpd_qrotate
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qscaleb
gvn
                        
load eliminated by PRE 
mpd_qscaleb
gvn
                        
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qshiftn
gvn
                        
load eliminated by PRE 
mpd_qshiftn
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qshift
gvn
                        
load eliminated by PRE 
mpd_qshift
gvn
                        
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qminus
gvn
                        
load eliminated by PRE 
mpd_qminus
gvn
                        
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qplus
gvn
                        
load eliminated by PRE 
mpd_qplus
gvn
                        
load of type i8 not eliminated in favor of load because it is clobbered by call 
_mpd_qround_to_integral
gvn
                        
load eliminated by PRE 
_mpd_qround_to_integral
gvn
                        
load of type i8 not eliminated in favor of load because it is clobbered by call 
_mpd_base_ndivmod
gvn
                        
load of type i8 not eliminated in favor of load because it is clobbered by call 
_mpd_qdivmod
gvn
                        
load eliminated by PRE 
_mpd_qdivmod
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qdivmod
gvn
                        
load eliminated by PRE 
mpd_qdivmod
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qfma
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
_mpd_qpow_uint
gvn
                        
load of type i8 not eliminated in favor of load because it is clobbered by call 
_mpd_qexp
gvn
                        
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qexp
gvn
                        
load eliminated by PRE 
mpd_qexp
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
_mpd_qln
gvn
                        
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qln
gvn
                        
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qlog10
gvn
                        
load eliminated by PRE 
mpd_qlog10
gvn
                        
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qmax
gvn
                        
load eliminated by PRE 
mpd_qmax
gvn
                        
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qmax_mag
gvn
                        
load eliminated by PRE 
mpd_qmax_mag
gvn
                        
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qmin
gvn
                        
load eliminated by PRE 
mpd_qmin
gvn
                        
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qmin_mag
gvn
                        
load eliminated by PRE 
mpd_qmin_mag
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qnext_minus
gvn
                        
load eliminated by PRE 
mpd_qnext_minus
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qnext_plus
gvn
                        
load eliminated by PRE 
mpd_qnext_plus
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qnext_toward
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
_mpd_qpow_int
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
_mpd_qpow_real
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qpow
gvn
                        
load eliminated by PRE 
mpd_qpow
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qcheck_3nans
gvn
                        
load of type i8 not eliminated in favor of load because it is clobbered by call 
_mpd_qrescale
gvn
                        
load eliminated by PRE 
_mpd_qrescale
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qrem
gvn
                        
load eliminated by PRE 
mpd_qrem
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qpowmod
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qquantize
gvn
                        
load eliminated by PRE 
mpd_qquantize
gvn
                        
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qreduce
gvn
                        
load eliminated by PRE 
mpd_qreduce
gvn
                        
load of type i8 not eliminated because it is clobbered by call 
mpd_qrem_near
gvn
                        
load eliminated by PRE 
mpd_qrem_near
gvn
                        
load of type i8 not eliminated in favor of load because it is clobbered by call 
_mpd_qinvroot
gvn
                        
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qinvroot
gvn
                        
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_qsqrt
712
    result->flags &= (MPD_STATIC|MPD_DATAFLAGS);
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qcopy
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qshiftl
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qcopy_static
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qsshiftr
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qget_uint
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qcheck_nan
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qcheck_nans
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qncopy
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qcopy_abs
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qcopy_negate
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qcopy_sign
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qshiftr
gvn
                  
load of type i8 eliminated in favor of -112 
_mpd_qaddsub
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qrotate
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qscaleb
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qshiftn
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qshift
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qminus
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qplus
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
_mpd_qround_to_integral
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
_mpd_base_ndivmod
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
_mpd_qdivmod
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qdivmod
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qfma
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
_mpd_qpow_uint
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
_mpd_qexp
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qexp
gvn
                  
load eliminated by PRE 
mpd_qexp
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qln
gvn
                  
load eliminated by PRE 
_mpd_qln
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qln
gvn
                  
load eliminated by PRE 
mpd_qln
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qlog10
gvn
                  
load eliminated by PRE 
mpd_qlog10
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qmax
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qmax_mag
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qmin
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qmin_mag
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qnext_minus
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qnext_plus
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qnext_toward
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
_mpd_qpow_mpd
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qpow_int
gvn
                  
load eliminated by PRE 
_mpd_qpow_int
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
_mpd_qpow_real
gvn
                  
load eliminated by PRE 
_mpd_qpow_real
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qpow
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qcheck_3nans
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
_mpd_qrescale
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qrem
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
_mpd_qpowmod_uint
licm
                  
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qpowmod
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qpowmod
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qquantize
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qreduce
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qrem_near
gvn
                  
load eliminated by PRE 
mpd_qrem_near
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
_mpd_qinvroot
gvn
                  
load of type i8 not eliminated because it is clobbered by call 
mpd_qinvroot
gvn
                  
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qsqrt
713
    result->flags |= (aflags & ~(MPD_STATIC|MPD_DATAFLAGS));
714
}
715
716
/* Initialize a workcontext from ctx. Set traps, flags and newtrap to 0. */
717
static inline void
718
mpd_workcontext(mpd_context_t *workctx, const mpd_context_t *ctx)
719
{
720
    workctx->prec = ctx->prec;
gvn
                         
load of type i64 eliminated in favor of load 
mpd_qnext_minus
gvn
                         
load of type i64 eliminated in favor of load 
mpd_qnext_plus
gvn
                         
load of type i64 not eliminated because it is clobbered by call 
mpd_qpow
721
    workctx->emax = ctx->emax;
gvn
                         
load of type i64 not eliminated because it is clobbered by call 
mpd_qpow
722
    workctx->emin = ctx->emin;
gvn
                         
load of type i64 eliminated in favor of load 
mpd_qnext_minus
gvn
                         
load of type i64 eliminated in favor of load 
mpd_qnext_plus
gvn
                         
load of type i64 not eliminated because it is clobbered by call 
mpd_qpow
723
    workctx->round = ctx->round;
724
    workctx->traps = 0;
725
    workctx->status = 0;
726
    workctx->newtrap = 0;
727
    workctx->clamp = ctx->clamp;
728
    workctx->allcr = ctx->allcr;
gvn
                          
load of type i32 not eliminated because it is clobbered by call 
mpd_qpow
729
}
730
731
732
/******************************************************************************/
733
/*                  Getting and setting parts of decimals                     */
734
/******************************************************************************/
735
736
/* Flip the sign of a decimal */
737
static inline void
738
_mpd_negate(mpd_t *dec)
739
{
740
    dec->flags ^= MPD_NEG;
gvn
               
load of type i8 not eliminated because it is clobbered by call 
mpd_qcopy_negate
gvn
               
load of type i8 not eliminated because it is clobbered by call 
mpd_qminus
gvn
               
load eliminated by PRE 
mpd_qminus
741
}
742
743
/* Set coefficient to zero */
744
void
745
mpd_zerocoeff(mpd_t *result)
746
{
747
    mpd_minalloc(result);
inline
    
mpd_minalloc should always be inlined (cost=always) 
mpd_zerocoeff
inline
    
mpd_minalloc inlined into mpd_zerocoeff 
mpd_zerocoeff
748
    result->digits = 1;
749
    result->len = 1;
750
    result->data[0] = 0;
gvn
            
load of type i64* not eliminated in favor of load because it is clobbered by call 
mpd_qshiftr_inplace
gvn
            
load of type i64* eliminated in favor of phi 
_mpd_check_exp
gvn
            
load of type i64* not eliminated because it is clobbered by call 
mpd_qsshiftr
gvn
            
load of type i64* not eliminated in favor of store because it is clobbered by call 
_mpd_qget_uint
gvn
            
load of type i64* not eliminated because it is clobbered by call 
mpd_qshiftr
751
}
752
753
/* Set the coefficient to all nines. */
754
void
755
mpd_qmaxcoeff(mpd_t *result, const mpd_context_t *ctx, uint32_t *status)
756
{
757
    mpd_ssize_t len, r;
758
759
    _mpd_idiv_word(&len, &r, ctx->prec, MPD_RDIGITS);
inline
    
_mpd_idiv_word can be inlined into mpd_qmaxcoeff with cost=-30 (threshold=487) 
mpd_qmaxcoeff
inline
    
_mpd_idiv_word inlined into mpd_qmaxcoeff 
mpd_qmaxcoeff
760
    len = (r == 0) ? len : len+1;
761
762
    if (!mpd_qresize(result, len, status)) {
inline
         
mpd_qresize should always be inlined (cost=always) 
mpd_qmaxcoeff
inline
         
mpd_qresize inlined into mpd_qmaxcoeff 
mpd_qmaxcoeff
763
        return;
764
    }
765
766
    result->len = len;
767
    result->digits = ctx->prec;
gvn
                          
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qmaxcoeff
gvn
                          
load eliminated by PRE 
mpd_qmaxcoeff
768
769
    --len;
770
    if (r > 0) {
771
        result->data[len--] = mpd_pow10[r]-1;
gvn
                
load of type i64* not eliminated because it is clobbered by call 
mpd_qmaxcoeff
772
    }
773
    for (; len >= 0; --len) {
loop-vectorize
    
vectorized loop (vectorization width: 2, interleaved count: 2) 
mpd_qmaxcoeff
loop-unroll
    
unrolled loop by a factor of 4 with run-time trip count 
mpd_qmaxcoeff
774
        result->data[len] = MPD_RADIX-1;
licm
                
hosting getelementptr 
mpd_qmaxcoeff
775
    }
776
}
777
778
/*
779
 * Cut off the most significant digits so that the rest fits in ctx->prec.
780
 * Cannot fail.
781
 */
782
static void
783
_mpd_cap(mpd_t *result, const mpd_context_t *ctx)
784
{
785
    uint32_t dummy;
786
    mpd_ssize_t len, r;
787
788
    if (result->len > 0 && result->digits > ctx->prec) {
789
        _mpd_idiv_word(&len, &r, ctx->prec, MPD_RDIGITS);
inline
        
_mpd_idiv_word can be inlined into _mpd_cap with cost=-30 (threshold=487) 
_mpd_cap
inline
        
_mpd_idiv_word inlined into _mpd_cap 
_mpd_cap
790
        len = (r == 0) ? len : len+1;
791
792
        if (r != 0) {
793
            result->data[len-1] %= mpd_pow10[r];
794
        }
795
796
        len = _mpd_real_size(result->data, len);
inline
              
_mpd_real_size can be inlined into _mpd_cap with cost=-5 (threshold=325) 
_mpd_cap
inline
              
_mpd_real_size inlined into _mpd_cap 
_mpd_cap
797
        /* resize to fewer words cannot fail */
798
        mpd_qresize(result, len, &dummy);
inline
        
mpd_qresize should always be inlined (cost=always) 
_mpd_cap
inline
        
mpd_qresize inlined into _mpd_cap 
_mpd_cap
799
        result->len = len;
800
        mpd_setdigits(result);
inline
        
mpd_setdigits can be inlined into _mpd_cap with cost=295 (threshold=325) 
_mpd_cap
inline
        
mpd_setdigits inlined into _mpd_cap 
_mpd_cap
801
    }
802
    if (mpd_iszero(result)) {
inline
        
mpd_iszero should always be inlined (cost=always) 
_mpd_cap
inline
        
mpd_iszero inlined into _mpd_cap 
_mpd_cap
803
        _settriple(result, mpd_sign(result), 0, result->exp);
inline
        
_settriple can be inlined into _mpd_cap with cost=180 (threshold=250) 
_mpd_cap
inline
        
_settriple inlined into _mpd_cap 
_mpd_cap
inline
                           
mpd_sign should always be inlined (cost=always) 
_mpd_cap
inline
                           
mpd_sign inlined into _mpd_cap 
_mpd_cap
gvn
                                                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_cap
804
    }
805
}
806
807
/*
808
 * Cut off the most significant digits of a NaN payload so that the rest
809
 * fits in ctx->prec - ctx->clamp. Cannot fail.
810
 */
811
static void
812
_mpd_fix_nan(mpd_t *result, const mpd_context_t *ctx)
813
{
814
    uint32_t dummy;
815
    mpd_ssize_t prec;
816
    mpd_ssize_t len, r;
817
818
    prec = ctx->prec - ctx->clamp;
819
    if (result->len > 0 && result->digits > prec) {
820
        if (prec == 0) {
821
            mpd_minalloc(result);
inline
            
mpd_minalloc should always be inlined (cost=always) 
_mpd_fix_nan
inline
            
mpd_minalloc inlined into _mpd_fix_nan 
_mpd_fix_nan
822
            result->len = result->digits = 0;
823
        }
824
        else {
825
            _mpd_idiv_word(&len, &r, prec, MPD_RDIGITS);
inline
            
_mpd_idiv_word can be inlined into _mpd_fix_nan with cost=-30 (threshold=487) 
_mpd_fix_nan
inline
            
_mpd_idiv_word inlined into _mpd_fix_nan 
_mpd_fix_nan
826
            len = (r == 0) ? len : len+1;
827
828
            if (r != 0) {
829
                 result->data[len-1] %= mpd_pow10[r];
830
            }
831
832
            len = _mpd_real_size(result->data, len);
inline
                  
_mpd_real_size can be inlined into _mpd_fix_nan with cost=-5 (threshold=325) 
_mpd_fix_nan
inline
                  
_mpd_real_size inlined into _mpd_fix_nan 
_mpd_fix_nan
833
            /* resize to fewer words cannot fail */
834
            mpd_qresize(result, len, &dummy);
inline
            
mpd_qresize should always be inlined (cost=always) 
_mpd_fix_nan
inline
            
mpd_qresize inlined into _mpd_fix_nan 
_mpd_fix_nan
835
            result->len = len;
836
            mpd_setdigits(result);
inline
            
mpd_setdigits can be inlined into _mpd_fix_nan with cost=295 (threshold=325) 
_mpd_fix_nan
inline
            
mpd_setdigits inlined into _mpd_fix_nan 
_mpd_fix_nan
837
            if (mpd_iszerocoeff(result)) {
inline
                
mpd_iszerocoeff should always be inlined (cost=always) 
_mpd_fix_nan
inline
                
mpd_iszerocoeff inlined into _mpd_fix_nan 
_mpd_fix_nan
838
                /* NaN0 is not a valid representation */
839
                result->len = result->digits = 0;
840
            }
841
        }
842
    }
843
}
844
845
/*
846
 * Get n most significant digits from a decimal, where 0 < n <= MPD_UINT_DIGITS.
847
 * Assumes MPD_UINT_DIGITS == MPD_RDIGITS+1, which is true for 32 and 64 bit
848
 * machines.
849
 *
850
 * The result of the operation will be in lo. If the operation is impossible,
851
 * hi will be nonzero. This is used to indicate an error.
852
 */
853
static inline void
854
_mpd_get_msdigits(mpd_uint_t *hi, mpd_uint_t *lo, const mpd_t *dec,
855
                  unsigned int n)
856
{
857
    mpd_uint_t r, tmp;
858
859
    assert(0 < n && n <= MPD_RDIGITS+1);
860
861
    _mpd_div_word(&tmp, &r, dec->digits, MPD_RDIGITS);
inline
    
_mpd_div_word can be inlined into _mpd_get_msdigits with cost=-35 (threshold=487) 
_mpd_get_msdigits
inline
    
_mpd_div_word inlined into _mpd_get_msdigits 
_mpd_get_msdigits
gvn
                                 
load of type i64 not eliminated because it is clobbered by store 
_mpd_qget_uint
gvn
                                 
load eliminated by PRE 
_mpd_qget_uint
gvn
                                 
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qln
gvn
                                 
load eliminated by PRE 
_mpd_qln
gvn
                                 
load of type i64 eliminated in favor of phi 
_mpd_qinvroot
862
    r = (r == 0) ? MPD_RDIGITS : r; /* digits in the most significant word */
863
864
    *hi = 0;
865
    *lo = dec->data[dec->len-1];
gvn
                         
load of type i64 not eliminated because it is clobbered by store 
_mpd_get_msdigits
gvn
          
load of type i64 not eliminated because it is clobbered by store 
_mpd_get_msdigits
gvn
               
load of type i64* not eliminated because it is clobbered by call 
_mpd_qget_uint
gvn
               
load eliminated by PRE 
_mpd_qget_uint
gvn
                         
load of type i64 not eliminated because it is clobbered by store 
_mpd_qget_uint
gvn
                         
load eliminated by PRE 
_mpd_qget_uint
gvn
          
load of type i64 not eliminated because it is clobbered by store 
_mpd_qreciprocal
gvn
          
load of type i64 not eliminated because it is clobbered by store 
_mpd_base_ndivmod
gvn
               
load of type i64* not eliminated in favor of store because it is clobbered by call 
_mpd_qln
gvn
               
load eliminated by PRE 
_mpd_qln
gvn
                         
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qln
gvn
                         
load eliminated by PRE 
_mpd_qln
gvn
               
load of type i64* not eliminated because it is clobbered by call 
_mpd_qinvroot
gvn
                         
load of type i64 eliminated in favor of phi 
_mpd_qinvroot
gvn
          
load of type i64 not eliminated because it is clobbered by store 
_mpd_qinvroot
gvn
               
load of type i64* not eliminated because it is clobbered by call 
mpd_qinvroot
gvn
          
load of type i64 not eliminated because it is clobbered by store 
mpd_qinvroot
866
    if (n <= r) {
867
        *lo /= mpd_pow10[r-n];
868
    }
869
    else if (dec->len > 1) {
gvn
                  
load of type i64 not eliminated in favor of load because it is clobbered by store 
_mpd_get_msdigits
870
        /* at this point 1 <= r < n <= MPD_RDIGITS+1 */
871
        _mpd_mul_words(hi, lo, *lo, mpd_pow10[n-r]);
inline
        
_mpd_mul_words can be inlined into _mpd_get_msdigits with cost=-15 (threshold=487) 
_mpd_get_msdigits
inline
        
_mpd_mul_words inlined into _mpd_get_msdigits 
_mpd_get_msdigits
872
        tmp = dec->data[dec->len-2] / mpd_pow10[MPD_RDIGITS-(n-r)];
gvn
                   
load of type i64* eliminated in favor of load 
_mpd_get_msdigits
gvn
                             
load of type i64 not eliminated because it is clobbered by store 
_mpd_get_msdigits
gvn
              
load of type i64 not eliminated because it is clobbered by store 
_mpd_get_msdigits
gvn
              
load of type i64 not eliminated because it is clobbered by store 
_mpd_qreciprocal
gvn
              
load of type i64 not eliminated because it is clobbered by store 
_mpd_base_ndivmod
gvn
              
load of type i64 not eliminated because it is clobbered by store 
_mpd_qinvroot
gvn
              
load of type i64 not eliminated because it is clobbered by store 
mpd_qinvroot
873
        *lo = *lo + tmp;
874
        if (*lo < tmp) (*hi)++;
gvn
                            
load of type i64 not eliminated because it is clobbered by store 
_mpd_get_msdigits
875
    }
876
}
877
878
879
/******************************************************************************/
880
/*                   Gathering information about a decimal                    */
881
/******************************************************************************/
882
883
/* The real size of the coefficient without leading zero words. */
884
static inline mpd_ssize_t
885
_mpd_real_size(mpd_uint_t *data, mpd_ssize_t size)
886
{
887
    while (size > 1 && data[size-1] == 0) {
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
_mpd_fix_nan
loop-vectorize
    
loop not vectorized 
_mpd_fix_nan
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qand
loop-vectorize
    
loop not vectorized 
mpd_qand
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
_mpd_cap
loop-vectorize
    
loop not vectorized 
_mpd_cap
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qinvert
loop-vectorize
    
loop not vectorized 
mpd_qinvert
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qor
loop-vectorize
    
loop not vectorized 
mpd_qor
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qxor
loop-vectorize
    
loop not vectorized 
mpd_qxor
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
_mpd_qaddsub
loop-vectorize
    
loop not vectorized 
_mpd_qaddsub
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
_mpd_qdiv
loop-vectorize
    
loop not vectorized 
_mpd_qdiv
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
_mpd_qdivmod
loop-vectorize
    
loop not vectorized 
_mpd_qdivmod
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
_mpd_qmul
loop-vectorize
    
loop not vectorized 
_mpd_qmul
888
        size--;
889
    }
890
891
    return size;
892
}
893
894
/* Return number of trailing zeros. No errors are possible. */
895
mpd_ssize_t
896
mpd_trail_zeros(const mpd_t *dec)
897
{
898
    mpd_uint_t word;
899
    mpd_ssize_t i, tz = 0;
900
901
    for (i=0; i < dec->len; ++i) {
licm
                       
failed to hoist load with loop-invariant address because load is conditionally executed 
mpd_trail_zeros
gvn
                       
load of type i64 eliminated in favor of phi 
mpd_trail_zeros
gvn
                       
load of type i64 eliminated in favor of phi 
_mpd_qdiv
gvn
                       
load of type i64 eliminated in favor of load 
mpd_qreduce
gvn
                       
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qinvroot
gvn
                       
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qinvroot
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_trail_zeros
loop-vectorize
    
loop not vectorized 
mpd_trail_zeros
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_isinteger
loop-vectorize
    
loop not vectorized 
mpd_isinteger
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
_mpd_qget_uint
loop-vectorize
    
loop not vectorized 
_mpd_qget_uint
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
_mpd_qdiv
loop-vectorize
    
loop not vectorized 
_mpd_qdiv
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qpow
loop-vectorize
    
loop not vectorized 
mpd_qpow
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qpowmod
loop-vectorize
    
loop not vectorized 
mpd_qpowmod
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qreduce
loop-vectorize
    
loop not vectorized 
mpd_qreduce
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qinvroot
loop-vectorize
    
loop not vectorized 
mpd_qinvroot
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qexport_u16
loop-vectorize
    
loop not vectorized 
mpd_qexport_u16
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qexport_u32
loop-vectorize
    
loop not vectorized 
mpd_qexport_u32
902
        if (dec->data[i] != 0) {
licm
                 
hosting getelementptr 
mpd_trail_zeros
gvn
            
load of type i64 not eliminated because it is clobbered by store 
_mpd_qdiv
903
            word = dec->data[i];
904
            tz = i * MPD_RDIGITS;
905
            while (word % 10 == 0) {
loop-vectorize
            
loop not vectorized: could not determine number of loop iterations 
mpd_trail_zeros
loop-vectorize
            
loop not vectorized 
mpd_trail_zeros
loop-vectorize
            
loop not vectorized: could not determine number of loop iterations 
mpd_isinteger
loop-vectorize
            
loop not vectorized 
mpd_isinteger
loop-vectorize
            
loop not vectorized: could not determine number of loop iterations 
_mpd_qget_uint
loop-vectorize
            
loop not vectorized 
_mpd_qget_uint
loop-vectorize
            
loop not vectorized: could not determine number of loop iterations 
_mpd_qdiv
loop-vectorize
            
loop not vectorized 
_mpd_qdiv
loop-vectorize
            
loop not vectorized: could not determine number of loop iterations 
mpd_qpow
loop-vectorize
            
loop not vectorized 
mpd_qpow
loop-vectorize
            
loop not vectorized: could not determine number of loop iterations 
mpd_qpowmod
loop-vectorize
            
loop not vectorized 
mpd_qpowmod
loop-vectorize
            
loop not vectorized: could not determine number of loop iterations 
mpd_qreduce
loop-vectorize
            
loop not vectorized 
mpd_qreduce
loop-vectorize
            
loop not vectorized: could not determine number of loop iterations 
mpd_qinvroot
loop-vectorize
            
loop not vectorized 
mpd_qinvroot
loop-vectorize
            
loop not vectorized: could not determine number of loop iterations 
mpd_qexport_u16
loop-vectorize
            
loop not vectorized 
mpd_qexport_u16
loop-vectorize
            
loop not vectorized: could not determine number of loop iterations 
mpd_qexport_u32
loop-vectorize
            
loop not vectorized 
mpd_qexport_u32
906
                word /= 10;
907
                tz++;
908
            }
909
            break;
910
        }
911
    }
912
913
    return tz;
914
}
915
916
/* Integer: Undefined for specials */
917
static int
918
_mpd_isint(const mpd_t *dec)
919
{
920
    mpd_ssize_t tz;
921
922
    if (mpd_iszerocoeff(dec)) {
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
_mpd_isint
inline
        
mpd_iszerocoeff inlined into _mpd_isint 
_mpd_isint
923
        return 1;
924
    }
925
926
    tz = mpd_trail_zeros(dec);
inline
         
mpd_trail_zeros can be inlined into _mpd_isint with cost=65 (threshold=250) 
_mpd_isint
inline
         
mpd_trail_zeros inlined into _mpd_isint 
_mpd_isint
927
    return (dec->exp + tz >= 0);
gvn
                 
load of type i64 eliminated in favor of load 
_mpd_qget_uint
gvn
                 
load of type i64 not eliminated because it is clobbered by call 
_qcheck_pow_one
gvn
                 
load of type i64 not eliminated because it is clobbered by call 
mpd_qpow
928
}
929
930
/* Integer */
931
int
932
mpd_isinteger(const mpd_t *dec)
933
{
934
    if (mpd_isspecial(dec)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_isinteger
inline
        
mpd_isspecial inlined into mpd_isinteger 
mpd_isinteger
935
        return 0;
936
    }
937
    return _mpd_isint(dec);
inline
           
_mpd_isint can be inlined into mpd_isinteger with cost=110 (threshold=250) 
mpd_isinteger
inline
           
_mpd_isint inlined into mpd_isinteger 
mpd_isinteger
938
}
939
940
/* Word is a power of 10 */
941
static int
942
mpd_word_ispow10(mpd_uint_t word)
943
{
944
    int n;
945
946
    n = mpd_word_digits(word);
inline
        
mpd_word_digits should always be inlined (cost=always) 
mpd_word_ispow10
inline
        
mpd_word_digits inlined into mpd_word_ispow10 
mpd_word_ispow10
947
    if (word == mpd_pow10[n-1]) {
948
        return 1;
949
    }
950
951
    return 0;
952
}
953
954
/* Coefficient is a power of 10 */
955
static int
956
mpd_coeff_ispow10(const mpd_t *dec)
957
{
958
    if (mpd_word_ispow10(mpd_msword(dec))) {
inline
                         
mpd_msword should always be inlined (cost=always) 
mpd_coeff_ispow10
inline
                         
mpd_msword inlined into mpd_coeff_ispow10 
mpd_coeff_ispow10
inline
        
mpd_word_ispow10 can be inlined into mpd_coeff_ispow10 with cost=-14730 (threshold=250) 
mpd_coeff_ispow10
inline
        
mpd_word_ispow10 inlined into mpd_coeff_ispow10 
mpd_coeff_ispow10
959
        if (_mpd_isallzero(dec->data, dec->len-1)) {
inline
            
_mpd_isallzero can be inlined into mpd_coeff_ispow10 with cost=-15005 (threshold=325) 
mpd_coeff_ispow10
inline
            
_mpd_isallzero inlined into mpd_coeff_ispow10 
mpd_coeff_ispow10
960
            return 1;
961
        }
962
    }
963
964
    return 0;
965
}
966
967
/* All digits of a word are nines */
968
static int
969
mpd_word_isallnine(mpd_uint_t word)
970
{
971
    int n;
972
973
    n = mpd_word_digits(word);
inline
        
mpd_word_digits should always be inlined (cost=always) 
mpd_word_isallnine
inline
        
mpd_word_digits inlined into mpd_word_isallnine 
mpd_word_isallnine
974
    if (word == mpd_pow10[n]-1) {
975
        return 1;
976
    }
977
978
    return 0;
979
}
980
981
/* All digits of the coefficient are nines */
982
static int
983
mpd_coeff_isallnine(const mpd_t *dec)
984
{
985
    if (mpd_word_isallnine(mpd_msword(dec))) {
inline
                           
mpd_msword should always be inlined (cost=always) 
mpd_coeff_isallnine
inline
                           
mpd_msword inlined into mpd_coeff_isallnine 
mpd_coeff_isallnine
inline
        
mpd_word_isallnine can be inlined into mpd_coeff_isallnine with cost=-14730 (threshold=250) 
mpd_coeff_isallnine
inline
        
mpd_word_isallnine inlined into mpd_coeff_isallnine 
mpd_coeff_isallnine
986
        if (_mpd_isallnine(dec->data, dec->len-1)) {
inline
            
_mpd_isallnine can be inlined into mpd_coeff_isallnine with cost=-15005 (threshold=325) 
mpd_coeff_isallnine
inline
            
_mpd_isallnine inlined into mpd_coeff_isallnine 
mpd_coeff_isallnine
987
            return 1;
988
        }
989
    }
990
991
    return 0;
992
}
993
994
/* Odd decimal: Undefined for non-integers! */
995
int
996
mpd_isodd(const mpd_t *dec)
997
{
998
    mpd_uint_t q, r;
999
    assert(mpd_isinteger(dec));
1000
    if (mpd_iszerocoeff(dec)) return 0;
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_isodd
inline
        
mpd_iszerocoeff inlined into mpd_isodd 
mpd_isodd
1001
    if (dec->exp < 0) {
licm
             
hosting getelementptr 
_mpd_qpow_mpd
licm
             
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qpow_mpd
gvn
             
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpow_mpd
licm
             
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qpow_int
gvn
             
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qpow_int
licm
             
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qpow
gvn
             
load of type i64 not eliminated because it is clobbered by call 
mpd_qpow
licm
             
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qpowmod
gvn
             
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qpowmod
gvn
             
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qrem_near
1002
        _mpd_div_word(&q, &r, -dec->exp, MPD_RDIGITS);
inline
        
_mpd_div_word can be inlined into mpd_isodd with cost=-35 (threshold=487) 
mpd_isodd
inline
        
_mpd_div_word inlined into mpd_isodd 
mpd_isodd
1003
        q = dec->data[q] / mpd_pow10[r];
1004
        return mpd_isoddword(q);
inline
               
mpd_isoddword should always be inlined (cost=always) 
mpd_isodd
inline
               
mpd_isoddword inlined into mpd_isodd 
mpd_isodd
1005
    }
1006
    return dec->exp == 0 && mpd_isoddword(dec->data[0]);
inline
                            
mpd_isoddword should always be inlined (cost=always) 
mpd_isodd
inline
                            
mpd_isoddword inlined into mpd_isodd 
mpd_isodd
1007
}
1008
1009
/* Even: Undefined for non-integers! */
1010
int
1011
mpd_iseven(const mpd_t *dec)
1012
{
1013
    return !mpd_isodd(dec);
inline
            
mpd_isodd can be inlined into mpd_iseven with cost=95 (threshold=250) 
mpd_iseven
inline
            
mpd_isodd inlined into mpd_iseven 
mpd_iseven
1014
}
1015
1016
/******************************************************************************/
1017
/*                      Getting and setting decimals                          */
1018
/******************************************************************************/
1019
1020
/* Internal function: Set a static decimal from a triple, no error checking. */
1021
static void
1022
_ssettriple(mpd_t *result, uint8_t sign, mpd_uint_t a, mpd_ssize_t exp)
1023
{
1024
    mpd_set_flags(result, sign);
inline
    
mpd_set_flags should always be inlined (cost=always) 
_ssettriple
inline
    
mpd_set_flags inlined into _ssettriple 
_ssettriple
1025
    result->exp = exp;
1026
    _mpd_div_word(&result->data[1], &result->data[0], a, MPD_RADIX);
inline
    
_mpd_div_word can be inlined into _ssettriple with cost=-25 (threshold=487) 
_ssettriple
inline
    
_mpd_div_word inlined into _ssettriple 
_ssettriple
1027
    result->len = (result->data[1] == 0) ? 1 : 2;
gvn
                           
load of type i64* eliminated in favor of load 
_ssettriple
gvn
                   
load of type i64 eliminated in favor of zext 
_ssettriple
1028
    mpd_setdigits(result);
inline
    
mpd_setdigits can be inlined into _ssettriple with cost=295 (threshold=325) 
_ssettriple
inline
    
mpd_setdigits inlined into _ssettriple 
_ssettriple
1029
}
1030
1031
/* Internal function: Set a decimal from a triple, no error checking. */
1032
static void
1033
_settriple(mpd_t *result, uint8_t sign, mpd_uint_t a, mpd_ssize_t exp)
1034
{
1035
    mpd_minalloc(result);
inline
    
mpd_minalloc should always be inlined (cost=always) 
_settriple
inline
    
mpd_minalloc inlined into _settriple 
_settriple
1036
    mpd_set_flags(result, sign);
inline
    
mpd_set_flags should always be inlined (cost=always) 
_settriple
inline
    
mpd_set_flags inlined into _settriple 
_settriple
1037
    result->exp = exp;
1038
    _mpd_div_word(&result->data[1], &result->data[0], a, MPD_RADIX);
inline
    
_mpd_div_word can be inlined into _settriple with cost=-25 (threshold=487) 
_settriple
inline
    
_mpd_div_word inlined into _settriple 
_settriple
gvn
                           
load of type i64* not eliminated because it is clobbered by call 
mpd_qcompare
gvn
                           
load of type i64* not eliminated because it is clobbered by call 
mpd_qcompare_signal
gvn
                           
load of type i64* not eliminated because it is clobbered by call 
mpd_compare_total
gvn
                           
load of type i64* not eliminated because it is clobbered by call 
mpd_compare_total_mag
gvn
                           
load of type i64* eliminated in favor of phi 
_mpd_cap
gvn
                           
load of type i64* not eliminated because it is clobbered by call 
_mpd_qdiv
gvn
                           
load of type i64* not eliminated because it is clobbered by call 
_mpd_qdivmod
gvn
                           
load of type i64* not eliminated because it is clobbered by call 
mpd_qdivmod
gvn
                           
load of type i64* not eliminated because it is clobbered by call 
mpd_qdivint
gvn
                           
load of type i64* not eliminated because it is clobbered by call 
_mpd_qexp_check_one
gvn
                           
load of type i64* not eliminated in favor of store because it is clobbered by call 
_mpd_qexp
gvn
                           
load of type i64* not eliminated because it is clobbered by call 
mpd_qexp
gvn
                           
load of type i64* not eliminated in favor of load because it is clobbered by call 
_mpd_qln
gvn
                           
load of type i64* not eliminated because it is clobbered by call 
mpd_qln
gvn
                           
load of type i64* not eliminated because it is clobbered by call 
_qcheck_pow_one
gvn
                           
load of type i64* not eliminated because it is clobbered by call 
_qcheck_pow_bounds
gvn
                           
load of type i64* not eliminated because it is clobbered by call 
_mpd_qpow_int
gvn
                           
load of type i64* not eliminated because it is clobbered by call 
mpd_qpow
gvn
                           
load of type i64* not eliminated because it is clobbered by call 
mpd_qpowmod
gvn
                           
load of type i64* eliminated in favor of phi 
mpd_qreduce
gvn
                           
load of type i64* not eliminated because it is clobbered by call 
mpd_qinvroot
1039
    result->len = (result->data[1] == 0) ? 1 : 2;
gvn
                           
load of type i64* eliminated in favor of load 
_settriple
gvn
                   
load of type i64 eliminated in favor of zext 
_settriple
1040
    mpd_setdigits(result);
inline
    
Not inlining. Cost of inlining mpd_setdigits increases the cost of inlining _settriple in other contexts 
_settriple
inline
    
mpd_setdigits will not be inlined into _settriple 
_settriple
inline
    
mpd_setdigits can be inlined into mpd_qset_uint with cost=295 (threshold=325) 
mpd_qset_uint
inline
    
mpd_setdigits inlined into mpd_qset_uint 
mpd_qset_uint
inline
    
mpd_setdigits can be inlined into mpd_qcompare with cost=295 (threshold=325) 
mpd_qcompare
inline
    
mpd_setdigits inlined into mpd_qcompare 
mpd_qcompare
inline
    
mpd_setdigits can be inlined into mpd_qcompare_signal with cost=295 (threshold=325) 
mpd_qcompare_signal
inline
    
mpd_setdigits inlined into mpd_qcompare_signal 
mpd_qcompare_signal
inline
    
mpd_setdigits can be inlined into mpd_compare_total with cost=295 (threshold=325) 
mpd_compare_total
inline
    
mpd_setdigits inlined into mpd_compare_total 
mpd_compare_total
inline
    
mpd_setdigits can be inlined into mpd_compare_total_mag with cost=295 (threshold=325) 
mpd_compare_total_mag
inline
    
mpd_setdigits inlined into mpd_compare_total_mag 
mpd_compare_total_mag
inline
    
mpd_setdigits can be inlined into _mpd_cap with cost=295 (threshold=325) 
_mpd_cap
inline
    
mpd_setdigits inlined into _mpd_cap 
_mpd_cap
inline
    
mpd_setdigits can be inlined into _mpd_qdiv_inf with cost=295 (threshold=325) 
_mpd_qdiv_inf
inline
    
mpd_setdigits inlined into _mpd_qdiv_inf 
_mpd_qdiv_inf
inline
    
mpd_setdigits can be inlined into _mpd_qround_to_integral with cost=295 (threshold=325) 
_mpd_qround_to_integral
inline
    
mpd_setdigits inlined into _mpd_qround_to_integral 
_mpd_qround_to_integral
inline
    
mpd_setdigits can be inlined into _mpd_qdiv with cost=295 (threshold=325) 
_mpd_qdiv
inline
    
mpd_setdigits inlined into _mpd_qdiv 
_mpd_qdiv
inline
    
mpd_setdigits can be inlined into _mpd_qdivmod with cost=295 (threshold=325) 
_mpd_qdivmod
inline
    
mpd_setdigits inlined into _mpd_qdivmod 
_mpd_qdivmod
inline
    
mpd_setdigits can be inlined into mpd_qdivmod with cost=295 (threshold=325) 
mpd_qdivmod
inline
    
mpd_setdigits inlined into mpd_qdivmod 
mpd_qdivmod
inline
    
mpd_setdigits can be inlined into mpd_qdivint with cost=295 (threshold=325) 
mpd_qdivint
inline
    
mpd_setdigits inlined into mpd_qdivint 
mpd_qdivint
inline
    
mpd_setdigits can be inlined into _mpd_qexp_check_one with cost=295 (threshold=325) 
_mpd_qexp_check_one
inline
    
mpd_setdigits inlined into _mpd_qexp_check_one 
_mpd_qexp_check_one
inline
    
mpd_setdigits can be inlined into _mpd_qpow_uint with cost=295 (threshold=325) 
_mpd_qpow_uint
inline
    
mpd_setdigits inlined into _mpd_qpow_uint 
_mpd_qpow_uint
inline
    
mpd_setdigits can be inlined into _mpd_qexp with cost=295 (threshold=325) 
_mpd_qexp
inline
    
mpd_setdigits inlined into _mpd_qexp 
_mpd_qexp
inline
    
mpd_setdigits can be inlined into mpd_qexp with cost=295 (threshold=325) 
mpd_qexp
inline
    
mpd_setdigits inlined into mpd_qexp 
mpd_qexp
inline
    
mpd_setdigits can be inlined into _mpd_qln with cost=295 (threshold=325) 
_mpd_qln
inline
    
mpd_setdigits inlined into _mpd_qln 
_mpd_qln
inline
    
mpd_setdigits can be inlined into mpd_qln with cost=295 (threshold=325) 
mpd_qln
inline
    
mpd_setdigits inlined into mpd_qln 
mpd_qln
inline
    
mpd_setdigits can be inlined into mpd_qlog10 with cost=295 (threshold=325) 
mpd_qlog10
inline
    
mpd_setdigits inlined into mpd_qlog10 
mpd_qlog10
inline
    
mpd_setdigits can be inlined into _qcheck_pow_one with cost=295 (threshold=325) 
_qcheck_pow_one
inline
    
mpd_setdigits inlined into _qcheck_pow_one 
_qcheck_pow_one
inline
    
mpd_setdigits can be inlined into _qcheck_pow_bounds with cost=295 (threshold=325) 
_qcheck_pow_bounds
inline
    
mpd_setdigits inlined into _qcheck_pow_bounds 
_qcheck_pow_bounds
inline
    
mpd_setdigits can be inlined into _mpd_qpow_int with cost=295 (threshold=325) 
_mpd_qpow_int
inline
    
mpd_setdigits inlined into _mpd_qpow_int 
_mpd_qpow_int
inline
    
mpd_setdigits can be inlined into mpd_qpow with cost=295 (threshold=325) 
mpd_qpow
inline
    
mpd_setdigits inlined into mpd_qpow 
mpd_qpow
inline
    
mpd_setdigits can be inlined into _mpd_qrescale with cost=295 (threshold=325) 
_mpd_qrescale
inline
    
mpd_setdigits inlined into _mpd_qrescale 
_mpd_qrescale
inline
    
mpd_setdigits can be inlined into mpd_qpowmod with cost=295 (threshold=325) 
mpd_qpowmod
inline
    
mpd_setdigits inlined into mpd_qpowmod 
mpd_qpowmod
inline
    
mpd_setdigits can be inlined into mpd_qquantize with cost=295 (threshold=325) 
mpd_qquantize
inline
    
mpd_setdigits inlined into mpd_qquantize 
mpd_qquantize
inline
    
mpd_setdigits can be inlined into mpd_qreduce with cost=295 (threshold=325) 
mpd_qreduce
inline
    
mpd_setdigits inlined into mpd_qreduce 
mpd_qreduce
inline
    
mpd_setdigits can be inlined into mpd_qinvroot with cost=295 (threshold=325) 
mpd_qinvroot
inline
    
mpd_setdigits inlined into mpd_qinvroot 
mpd_qinvroot
inline
    
mpd_setdigits can be inlined into mpd_qsqrt with cost=295 (threshold=325) 
mpd_qsqrt
inline
    
mpd_setdigits inlined into mpd_qsqrt 
mpd_qsqrt
1041
}
1042
1043
/* Set a special number from a triple */
1044
void
1045
mpd_setspecial(mpd_t *result, uint8_t sign, uint8_t type)
1046
{
1047
    mpd_minalloc(result);
inline
    
mpd_minalloc should always be inlined (cost=always) 
mpd_setspecial
inline
    
mpd_minalloc inlined into mpd_setspecial 
mpd_setspecial
1048
    result->flags &= ~(MPD_NEG|MPD_SPECIAL);
gvn
                  
load of type i8 not eliminated in favor of load because it is clobbered by call 
mpd_setspecial
gvn
                  
load eliminated by PRE 
mpd_setspecial
1049
    result->flags |= (sign|type);
1050
    result->exp = result->digits = result->len = 0;
1051
}
1052
1053
/* Set result of NaN with an error status */
1054
void
1055
mpd_seterror(mpd_t *result, uint32_t flags, uint32_t *status)
1056
{
1057
    mpd_minalloc(result);
inline
    
mpd_minalloc should always be inlined (cost=always) 
mpd_seterror
inline
    
mpd_minalloc inlined into mpd_seterror 
mpd_seterror
1058
    mpd_set_qnan(result);
inline
    
mpd_set_qnan should always be inlined (cost=always) 
mpd_seterror
inline
    
mpd_set_qnan inlined into mpd_seterror 
mpd_seterror
1059
    mpd_set_positive(result);
inline
    
mpd_set_positive should always be inlined (cost=always) 
mpd_seterror
inline
    
mpd_set_positive inlined into mpd_seterror 
mpd_seterror
1060
    result->exp = result->digits = result->len = 0;
licm
            
hosting getelementptr 
_mpd_base_ndivmod
licm
            
hosting getelementptr 
mpd_qsqrt
1061
    *status |= flags;
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_seterror
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qand
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qinvert
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qor
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qaddsub
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qrotate
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qscaleb
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qshiftn
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qshift
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qxor
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qadd
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qsub
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qdiv_inf
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qmul_inf
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qmul
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qmul_exact
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qsub_exact
licm
            
hosting bitcast 
_mpd_qreciprocal
licm
            
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qreciprocal
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qreciprocal
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qadd_exact
licm
            
hosting bitcast 
_mpd_base_ndivmod
licm
            
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_base_ndivmod
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_base_ndivmod
gvn
            
load eliminated by PRE 
_mpd_base_ndivmod
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qdiv
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qdivmod
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qdivint
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qexp
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qexp
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qln10
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qln
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qln
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qlog10
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_qcheck_pow_bounds
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qpow_mpd
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qpow_int
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qpow_real
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qpow
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qrescale
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qrescale
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qrem
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qpowmod
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_apply_round_fit
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qquantize
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qrem_near
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qrescale_fmt
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qinvroot
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qinvroot
licm
            
hosting bitcast 
mpd_qsqrt
licm
            
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qsqrt
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qsqrt
gvn
            
load eliminated by PRE 
mpd_qsqrt
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qimport_u16
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qimport_u32
1062
}
1063
1064
/* quietly set a static decimal from an mpd_ssize_t */
1065
void
1066
mpd_qsset_ssize(mpd_t *result, mpd_ssize_t a, const mpd_context_t *ctx,
1067
                uint32_t *status)
1068
{
1069
    mpd_uint_t u;
1070
    uint8_t sign = MPD_POS;
1071
1072
    if (a < 0) {
1073
        if (a == MPD_SSIZE_MIN) {
1074
            u = (mpd_uint_t)MPD_SSIZE_MAX +
1075
                (-(MPD_SSIZE_MIN+MPD_SSIZE_MAX));
1076
        }
1077
        else {
1078
            u = -a;
1079
        }
1080
        sign = MPD_NEG;
1081
    }
1082
    else {
1083
        u = a;
1084
    }
1085
    _ssettriple(result, sign, u, 0);
inline
    
_ssettriple too costly to inline (cost=335, threshold=250) 
mpd_qsset_ssize
inline
    
_ssettriple will not be inlined into mpd_qsset_ssize 
mpd_qsset_ssize
inline
    
_ssettriple too costly to inline (cost=335, threshold=250) 
mpd_qsset_i32
inline
    
_ssettriple will not be inlined into mpd_qsset_i32 
mpd_qsset_i32
inline
    
_ssettriple too costly to inline (cost=335, threshold=250) 
mpd_qsset_i64
inline
    
_ssettriple will not be inlined into mpd_qsset_i64 
mpd_qsset_i64
inline
    
_ssettriple too costly to inline (cost=335, threshold=250) 
mpd_qset_ssize
inline
    
_ssettriple will not be inlined into mpd_qset_ssize 
mpd_qset_ssize
inline
    
_ssettriple too costly to inline (cost=335, threshold=250) 
mpd_qset_i32
inline
    
_ssettriple will not be inlined into mpd_qset_i32 
mpd_qset_i32
inline
    
_ssettriple too costly to inline (cost=335, threshold=250) 
mpd_qset_i64
inline
    
_ssettriple will not be inlined into mpd_qset_i64 
mpd_qset_i64
inline
    
_ssettriple too costly to inline (cost=335, threshold=250) 
mpd_qlogb
inline
    
_ssettriple will not be inlined into mpd_qlogb 
mpd_qlogb
inline
    
_ssettriple too costly to inline (cost=305, threshold=250) 
mpd_qadd_ssize
inline
    
_ssettriple will not be inlined into mpd_qadd_ssize 
mpd_qadd_ssize
inline
    
_ssettriple too costly to inline (cost=305, threshold=250) 
mpd_qsub_ssize
inline
    
_ssettriple will not be inlined into mpd_qsub_ssize 
mpd_qsub_ssize
inline
    
_ssettriple too costly to inline (cost=305, threshold=250) 
mpd_qdiv_ssize
inline
    
_ssettriple will not be inlined into mpd_qdiv_ssize 
mpd_qdiv_ssize
inline
    
_ssettriple too costly to inline (cost=305, threshold=250) 
mpd_qmul_ssize
inline
    
_ssettriple will not be inlined into mpd_qmul_ssize 
mpd_qmul_ssize
1086
    mpd_qfinalize(result, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qsset_ssize
inline
    
mpd_qfinalize will not be inlined into mpd_qsset_ssize 
mpd_qsset_ssize
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qsset_i32
inline
    
mpd_qfinalize will not be inlined into mpd_qsset_i32 
mpd_qsset_i32
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qsset_i64
inline
    
mpd_qfinalize will not be inlined into mpd_qsset_i64 
mpd_qsset_i64
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qset_ssize
inline
    
mpd_qfinalize will not be inlined into mpd_qset_ssize 
mpd_qset_ssize
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qset_i32
inline
    
mpd_qfinalize will not be inlined into mpd_qset_i32 
mpd_qset_i32
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qset_i64
inline
    
mpd_qfinalize will not be inlined into mpd_qset_i64 
mpd_qset_i64
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qlogb
inline
    
mpd_qfinalize will not be inlined into mpd_qlogb 
mpd_qlogb
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qadd_ssize
inline
    
mpd_qfinalize will not be inlined into mpd_qadd_ssize 
mpd_qadd_ssize
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qsub_ssize
inline
    
mpd_qfinalize will not be inlined into mpd_qsub_ssize 
mpd_qsub_ssize
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qdiv_ssize
inline
    
mpd_qfinalize will not be inlined into mpd_qdiv_ssize 
mpd_qdiv_ssize
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qmul_ssize
inline
    
mpd_qfinalize will not be inlined into mpd_qmul_ssize 
mpd_qmul_ssize
1087
}
1088
1089
/* quietly set a static decimal from an mpd_uint_t */
1090
void
1091
mpd_qsset_uint(mpd_t *result, mpd_uint_t a, const mpd_context_t *ctx,
1092
               uint32_t *status)
1093
{
1094
    _ssettriple(result, MPD_POS, a, 0);
inline
    
_ssettriple too costly to inline (cost=335, threshold=250) 
mpd_qsset_uint
inline
    
_ssettriple will not be inlined into mpd_qsset_uint 
mpd_qsset_uint
inline
    
_ssettriple too costly to inline (cost=335, threshold=250) 
mpd_qsset_u32
inline
    
_ssettriple will not be inlined into mpd_qsset_u32 
mpd_qsset_u32
inline
    
_ssettriple too costly to inline (cost=335, threshold=250) 
mpd_qsset_u64
inline
    
_ssettriple will not be inlined into mpd_qsset_u64 
mpd_qsset_u64
inline
    
_ssettriple too costly to inline (cost=305, threshold=250) 
mpd_qadd_uint
inline
    
_ssettriple will not be inlined into mpd_qadd_uint 
mpd_qadd_uint
inline
    
_ssettriple too costly to inline (cost=305, threshold=250) 
mpd_qsub_uint
inline
    
_ssettriple will not be inlined into mpd_qsub_uint 
mpd_qsub_uint
inline
    
_ssettriple too costly to inline (cost=305, threshold=250) 
mpd_qadd_u32
inline
    
_ssettriple will not be inlined into mpd_qadd_u32 
mpd_qadd_u32
inline
    
_ssettriple too costly to inline (cost=305, threshold=250) 
mpd_qadd_u64
inline
    
_ssettriple will not be inlined into mpd_qadd_u64 
mpd_qadd_u64
inline
    
_ssettriple too costly to inline (cost=305, threshold=250) 
mpd_qsub_u32
inline
    
_ssettriple will not be inlined into mpd_qsub_u32 
mpd_qsub_u32
inline
    
_ssettriple too costly to inline (cost=305, threshold=250) 
mpd_qsub_u64
inline
    
_ssettriple will not be inlined into mpd_qsub_u64 
mpd_qsub_u64
inline
    
_ssettriple too costly to inline (cost=305, threshold=250) 
mpd_qdiv_uint
inline
    
_ssettriple will not be inlined into mpd_qdiv_uint 
mpd_qdiv_uint
inline
    
_ssettriple too costly to inline (cost=305, threshold=250) 
mpd_qdiv_u32
inline
    
_ssettriple will not be inlined into mpd_qdiv_u32 
mpd_qdiv_u32
inline
    
_ssettriple too costly to inline (cost=305, threshold=250) 
mpd_qdiv_u64
inline
    
_ssettriple will not be inlined into mpd_qdiv_u64 
mpd_qdiv_u64
inline
    
_ssettriple too costly to inline (cost=305, threshold=250) 
mpd_qmul_uint
inline
    
_ssettriple will not be inlined into mpd_qmul_uint 
mpd_qmul_uint
1095
    mpd_qfinalize(result, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qsset_uint
inline
    
mpd_qfinalize will not be inlined into mpd_qsset_uint 
mpd_qsset_uint
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qsset_u32
inline
    
mpd_qfinalize will not be inlined into mpd_qsset_u32 
mpd_qsset_u32
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qsset_u64
inline
    
mpd_qfinalize will not be inlined into mpd_qsset_u64 
mpd_qsset_u64
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qadd_uint
inline
    
mpd_qfinalize will not be inlined into mpd_qadd_uint 
mpd_qadd_uint
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qsub_uint
inline
    
mpd_qfinalize will not be inlined into mpd_qsub_uint 
mpd_qsub_uint
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qadd_u32
inline
    
mpd_qfinalize will not be inlined into mpd_qadd_u32 
mpd_qadd_u32
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qadd_u64
inline
    
mpd_qfinalize will not be inlined into mpd_qadd_u64 
mpd_qadd_u64
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qsub_u32
inline
    
mpd_qfinalize will not be inlined into mpd_qsub_u32 
mpd_qsub_u32
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qsub_u64
inline
    
mpd_qfinalize will not be inlined into mpd_qsub_u64 
mpd_qsub_u64
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qdiv_uint
inline
    
mpd_qfinalize will not be inlined into mpd_qdiv_uint 
mpd_qdiv_uint
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qdiv_u32
inline
    
mpd_qfinalize will not be inlined into mpd_qdiv_u32 
mpd_qdiv_u32
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qdiv_u64
inline
    
mpd_qfinalize will not be inlined into mpd_qdiv_u64 
mpd_qdiv_u64
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qmul_uint
inline
    
mpd_qfinalize will not be inlined into mpd_qmul_uint 
mpd_qmul_uint
1096
}
1097
1098
/* quietly set a static decimal from an int32_t */
1099
void
1100
mpd_qsset_i32(mpd_t *result, int32_t a, const mpd_context_t *ctx,
1101
              uint32_t *status)
1102
{
1103
    mpd_qsset_ssize(result, a, ctx, status);
inline
    
mpd_qsset_ssize can be inlined into mpd_qsset_i32 with cost=65 (threshold=375) 
mpd_qsset_i32
inline
    
mpd_qsset_ssize inlined into mpd_qsset_i32 
mpd_qsset_i32
1104
}
1105
1106
/* quietly set a static decimal from a uint32_t */
1107
void
1108
mpd_qsset_u32(mpd_t *result, uint32_t a, const mpd_context_t *ctx,
1109
              uint32_t *status)
1110
{
1111
    mpd_qsset_uint(result, a, ctx, status);
inline
    
mpd_qsset_uint can be inlined into mpd_qsset_u32 with cost=45 (threshold=375) 
mpd_qsset_u32
inline
    
mpd_qsset_uint inlined into mpd_qsset_u32 
mpd_qsset_u32
1112
}
1113
1114
#ifdef CONFIG_64
1115
/* quietly set a static decimal from an int64_t */
1116
void
1117
mpd_qsset_i64(mpd_t *result, int64_t a, const mpd_context_t *ctx,
1118
              uint32_t *status)
1119
{
1120
    mpd_qsset_ssize(result, a, ctx, status);
inline
    
mpd_qsset_ssize can be inlined into mpd_qsset_i64 with cost=65 (threshold=375) 
mpd_qsset_i64
inline
    
mpd_qsset_ssize inlined into mpd_qsset_i64 
mpd_qsset_i64
1121
}
1122
1123
/* quietly set a static decimal from a uint64_t */
1124
void
1125
mpd_qsset_u64(mpd_t *result, uint64_t a, const mpd_context_t *ctx,
1126
              uint32_t *status)
1127
{
1128
    mpd_qsset_uint(result, a, ctx, status);
inline
    
mpd_qsset_uint can be inlined into mpd_qsset_u64 with cost=45 (threshold=375) 
mpd_qsset_u64
inline
    
mpd_qsset_uint inlined into mpd_qsset_u64 
mpd_qsset_u64
1129
}
1130
#endif
1131
1132
/* quietly set a decimal from an mpd_ssize_t */
1133
void
1134
mpd_qset_ssize(mpd_t *result, mpd_ssize_t a, const mpd_context_t *ctx,
1135
               uint32_t *status)
1136
{
1137
    mpd_minalloc(result);
inline
    
mpd_minalloc should always be inlined (cost=always) 
mpd_qset_ssize
inline
    
mpd_minalloc inlined into mpd_qset_ssize 
mpd_qset_ssize
1138
    mpd_qsset_ssize(result, a, ctx, status);
inline
    
mpd_qsset_ssize can be inlined into mpd_qset_ssize with cost=65 (threshold=375) 
mpd_qset_ssize
inline
    
mpd_qsset_ssize inlined into mpd_qset_ssize 
mpd_qset_ssize
1139
}
1140
1141
/* quietly set a decimal from an mpd_uint_t */
1142
void
1143
mpd_qset_uint(mpd_t *result, mpd_uint_t a, const mpd_context_t *ctx,
1144
              uint32_t *status)
1145
{
1146
    _settriple(result, MPD_POS, a, 0);
inline
    
_settriple can be inlined into mpd_qset_uint with cost=185 (threshold=250) 
mpd_qset_uint
inline
    
_settriple inlined into mpd_qset_uint 
mpd_qset_uint
1147
    mpd_qfinalize(result, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qset_uint
inline
    
mpd_qfinalize will not be inlined into mpd_qset_uint 
mpd_qset_uint
1148
}
1149
1150
/* quietly set a decimal from an int32_t */
1151
void
1152
mpd_qset_i32(mpd_t *result, int32_t a, const mpd_context_t *ctx,
1153
             uint32_t *status)
1154
{
1155
    mpd_qset_ssize(result, a, ctx, status);
inline
    
mpd_qset_ssize can be inlined into mpd_qset_i32 with cost=200 (threshold=250) 
mpd_qset_i32
inline
    
mpd_qset_ssize inlined into mpd_qset_i32 
mpd_qset_i32
1156
}
1157
1158
/* quietly set a decimal from a uint32_t */
1159
void
1160
mpd_qset_u32(mpd_t *result, uint32_t a, const mpd_context_t *ctx,
1161
             uint32_t *status)
1162
{
1163
    mpd_qset_uint(result, a, ctx, status);
inline
    
mpd_qset_uint too costly to inline (cost=510, threshold=250) 
mpd_qset_u32
inline
    
mpd_qset_uint will not be inlined into mpd_qset_u32 
mpd_qset_u32
1164
}
1165
1166
#if defined(CONFIG_32) && !defined(LEGACY_COMPILER)
1167
/* set a decimal from a uint64_t */
1168
static void
1169
_c32setu64(mpd_t *result, uint64_t u, uint8_t sign, uint32_t *status)
1170
{
1171
    mpd_uint_t w[3];
1172
    uint64_t q;
1173
    int i, len;
1174
1175
    len = 0;
1176
    do {
1177
        q = u / MPD_RADIX;
1178
        w[len] = (mpd_uint_t)(u - q * MPD_RADIX);
1179
        u = q; len++;
1180
    } while (u != 0);
1181
1182
    if (!mpd_qresize(result, len, status)) {
1183
        return;
1184
    }
1185
    for (i = 0; i < len; i++) {
1186
        result->data[i] = w[i];
1187
    }
1188
1189
    mpd_set_sign(result, sign);
1190
    result->exp = 0;
1191
    result->len = len;
1192
    mpd_setdigits(result);
1193
}
1194
1195
static void
1196
_c32_qset_u64(mpd_t *result, uint64_t a, const mpd_context_t *ctx,
1197
              uint32_t *status)
1198
{
1199
    _c32setu64(result, a, MPD_POS, status);
1200
    mpd_qfinalize(result, ctx, status);
1201
}
1202
1203
/* set a decimal from an int64_t */
1204
static void
1205
_c32_qset_i64(mpd_t *result, int64_t a, const mpd_context_t *ctx,
1206
              uint32_t *status)
1207
{
1208
    uint64_t u;
1209
    uint8_t sign = MPD_POS;
1210
1211
    if (a < 0) {
1212
        if (a == INT64_MIN) {
1213
            u = (uint64_t)INT64_MAX + (-(INT64_MIN+INT64_MAX));
1214
        }
1215
        else {
1216
            u = -a;
1217
        }
1218
        sign = MPD_NEG;
1219
    }
1220
    else {
1221
        u = a;
1222
    }
1223
    _c32setu64(result, u, sign, status);
1224
    mpd_qfinalize(result, ctx, status);
1225
}
1226
#endif /* CONFIG_32 && !LEGACY_COMPILER */
1227
1228
#ifndef LEGACY_COMPILER
1229
/* quietly set a decimal from an int64_t */
1230
void
1231
mpd_qset_i64(mpd_t *result, int64_t a, const mpd_context_t *ctx,
1232
             uint32_t *status)
1233
{
1234
#ifdef CONFIG_64
1235
    mpd_qset_ssize(result, a, ctx, status);
inline
    
mpd_qset_ssize can be inlined into mpd_qset_i64 with cost=200 (threshold=250) 
mpd_qset_i64
inline
    
mpd_qset_ssize inlined into mpd_qset_i64 
mpd_qset_i64
1236
#else
1237
    _c32_qset_i64(result, a, ctx, status);
1238
#endif
1239
}
1240
1241
/* quietly set a decimal from a uint64_t */
1242
void
1243
mpd_qset_u64(mpd_t *result, uint64_t a, const mpd_context_t *ctx,
1244
             uint32_t *status)
1245
{
1246
#ifdef CONFIG_64
1247
    mpd_qset_uint(result, a, ctx, status);
inline
    
mpd_qset_uint too costly to inline (cost=510, threshold=250) 
mpd_qset_u64
inline
    
mpd_qset_uint will not be inlined into mpd_qset_u64 
mpd_qset_u64
1248
#else
1249
    _c32_qset_u64(result, a, ctx, status);
1250
#endif
1251
}
1252
#endif /* !LEGACY_COMPILER */
1253
1254
1255
/*
1256
 * Quietly get an mpd_uint_t from a decimal. Assumes
1257
 * MPD_UINT_DIGITS == MPD_RDIGITS+1, which is true for
1258
 * 32 and 64 bit machines.
1259
 *
1260
 * If the operation is impossible, MPD_Invalid_operation is set.
1261
 */
1262
static mpd_uint_t
1263
_mpd_qget_uint(int use_sign, const mpd_t *a, uint32_t *status)
1264
{
1265
    mpd_t tmp;
1266
    mpd_uint_t tmp_data[2];
1267
    mpd_uint_t lo, hi;
1268
1269
    if (mpd_isspecial(a)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
_mpd_qget_uint
inline
        
mpd_isspecial inlined into _mpd_qget_uint 
_mpd_qget_uint
1270
        *status |= MPD_Invalid_operation;
1271
        return MPD_UINT_MAX;
1272
    }
1273
    if (mpd_iszero(a)) {
inline
        
mpd_iszero should always be inlined (cost=always) 
_mpd_qget_uint
inline
        
mpd_iszero inlined into _mpd_qget_uint 
_mpd_qget_uint
1274
        return 0;
1275
    }
1276
    if (use_sign && mpd_isnegative(a)) {
inline
                    
mpd_isnegative should always be inlined (cost=always) 
_mpd_qget_uint
inline
                    
mpd_isnegative inlined into _mpd_qget_uint 
_mpd_qget_uint
1277
        *status |= MPD_Invalid_operation;
1278
        return MPD_UINT_MAX;
1279
    }
1280
1281
    if (a->digits+a->exp > MPD_RDIGITS+1) {
1282
        *status |= MPD_Invalid_operation;
1283
        return MPD_UINT_MAX;
1284
    }
1285
1286
    if (a->exp < 0) {
1287
        if (!_mpd_isint(a)) {
inline
             
_mpd_isint can be inlined into _mpd_qget_uint with cost=110 (threshold=250) 
_mpd_qget_uint
inline
             
_mpd_isint inlined into _mpd_qget_uint 
_mpd_qget_uint
1288
            *status |= MPD_Invalid_operation;
1289
            return MPD_UINT_MAX;
1290
        }
1291
        /* At this point a->digits+a->exp <= MPD_RDIGITS+1,
1292
         * so the shift fits. */
1293
        tmp.data = tmp_data;
1294
        tmp.flags = MPD_STATIC|MPD_STATIC_DATA;
1295
        tmp.alloc = 2;
1296
        mpd_qsshiftr(&tmp, a, -a->exp);
inline
        
mpd_qsshiftr can be inlined into _mpd_qget_uint with cost=-14560 (threshold=250) 
_mpd_qget_uint
inline
        
mpd_qsshiftr inlined into _mpd_qget_uint 
_mpd_qget_uint
gvn
                                  
load of type i64 eliminated in favor of load 
_mpd_qget_uint
1297
        tmp.exp = 0;
1298
        a = &tmp;
1299
    }
1300
1301
    _mpd_get_msdigits(&hi, &lo, a, MPD_RDIGITS+1);
inline
    
_mpd_get_msdigits can be inlined into _mpd_qget_uint with cost=140 (threshold=325) 
_mpd_qget_uint
inline
    
_mpd_get_msdigits inlined into _mpd_qget_uint 
_mpd_qget_uint
1302
    if (hi) {
1303
        *status |= MPD_Invalid_operation;
gvn
                
load of type i32 not eliminated because it is clobbered by call 
_mpd_qget_uint
1304
        return MPD_UINT_MAX;
1305
    }
1306
1307
    if (a->exp > 0) {
gvn
           
load of type i64 eliminated in favor of phi 
_mpd_qget_uint
1308
        _mpd_mul_words(&hi, &lo, lo, mpd_pow10[a->exp]);
inline
        
_mpd_mul_words can be inlined into _mpd_qget_uint with cost=-25 (threshold=487) 
_mpd_qget_uint
inline
        
_mpd_mul_words inlined into _mpd_qget_uint 
_mpd_qget_uint
1309
        if (hi) {
1310
            *status |= MPD_Invalid_operation;
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
_mpd_qget_uint
1311
            return MPD_UINT_MAX;
1312
        }
1313
    }
1314
1315
    return lo;
1316
}
1317
1318
/*
1319
 * Sets Invalid_operation for:
1320
 *   - specials
1321
 *   - negative numbers (except negative zero)
1322
 *   - non-integers
1323
 *   - overflow
1324
 */
1325
mpd_uint_t
1326
mpd_qget_uint(const mpd_t *a, uint32_t *status)
1327
{
1328
    return _mpd_qget_uint(1, a, status);
inline
           
_mpd_qget_uint too costly to inline (cost=630, threshold=625) 
mpd_qget_uint
inline
           
_mpd_qget_uint will not be inlined into mpd_qget_uint 
mpd_qget_uint
inline
           
_mpd_qget_uint too costly to inline (cost=630, threshold=625) 
mpd_qget_u64
inline
           
_mpd_qget_uint will not be inlined into mpd_qget_u64 
mpd_qget_u64
inline
           
_mpd_qget_uint too costly to inline (cost=630, threshold=625) 
mpd_qget_u32
inline
           
_mpd_qget_uint will not be inlined into mpd_qget_u32 
mpd_qget_u32
1329
}
1330
1331
/* Same as above, but gets the absolute value, i.e. the sign is ignored. */
1332
mpd_uint_t
1333
mpd_qabs_uint(const mpd_t *a, uint32_t *status)
1334
{
1335
    return _mpd_qget_uint(0, a, status);
inline
           
_mpd_qget_uint too costly to inline (cost=655, threshold=625) 
mpd_qabs_uint
inline
           
_mpd_qget_uint will not be inlined into mpd_qabs_uint 
mpd_qabs_uint
inline
           
_mpd_qget_uint too costly to inline (cost=655, threshold=625) 
mpd_qget_ssize
inline
           
_mpd_qget_uint will not be inlined into mpd_qget_ssize 
mpd_qget_ssize
inline
           
_mpd_qget_uint too costly to inline (cost=655, threshold=625) 
mpd_qget_i64
inline
           
_mpd_qget_uint will not be inlined into mpd_qget_i64 
mpd_qget_i64
inline
           
_mpd_qget_uint too costly to inline (cost=655, threshold=625) 
mpd_qget_i32
inline
           
_mpd_qget_uint will not be inlined into mpd_qget_i32 
mpd_qget_i32
inline
           
_mpd_qget_uint too costly to inline (cost=660, threshold=625) 
mpd_qrotate
inline
           
_mpd_qget_uint will not be inlined into mpd_qrotate 
mpd_qrotate
inline
           
_mpd_qget_uint too costly to inline (cost=660, threshold=625) 
mpd_qscaleb
inline
           
_mpd_qget_uint will not be inlined into mpd_qscaleb 
mpd_qscaleb
inline
           
_mpd_qget_uint too costly to inline (cost=660, threshold=625) 
mpd_qshift
inline
           
_mpd_qget_uint will not be inlined into mpd_qshift 
mpd_qshift
inline
           
_mpd_qget_uint too costly to inline (cost=660, threshold=625) 
_qcheck_pow_one
inline
           
_mpd_qget_uint will not be inlined into _qcheck_pow_one 
_qcheck_pow_one
inline
           
_mpd_qget_uint too costly to inline (cost=660, threshold=625) 
_mpd_qpow_int
inline
           
_mpd_qget_uint will not be inlined into _mpd_qpow_int 
_mpd_qpow_int
inline
           
_mpd_qget_uint too costly to inline (cost=660, threshold=625) 
mpd_qpow
inline
           
_mpd_qget_uint will not be inlined into mpd_qpow 
mpd_qpow
1336
}
1337
1338
/* quietly get an mpd_ssize_t from a decimal */
1339
mpd_ssize_t
1340
mpd_qget_ssize(const mpd_t *a, uint32_t *status)
1341
{
1342
    mpd_uint_t u;
1343
    int isneg;
1344
1345
    u = mpd_qabs_uint(a, status);
inline
        
mpd_qabs_uint can be inlined into mpd_qget_ssize with cost=5 (threshold=375) 
mpd_qget_ssize
inline
        
mpd_qabs_uint inlined into mpd_qget_ssize 
mpd_qget_ssize
1346
    if (*status&MPD_Invalid_operation) {
gvn
        
load of type i32 not eliminated because it is clobbered by call 
mpd_qget_ssize
gvn
        
load of type i32 not eliminated because it is clobbered by call 
mpd_qget_i64
gvn
        
load of type i32 not eliminated because it is clobbered by call 
mpd_qget_i32
gvn
        
load of type i32 not eliminated in favor of store because it is clobbered by call 
mpd_qrotate
gvn
        
load of type i32 not eliminated in favor of store because it is clobbered by call 
mpd_qshift
gvn
        
load of type i32 not eliminated because it is clobbered by call 
_qcheck_pow_one
gvn
        
load of type i32 not eliminated because it is clobbered by call 
mpd_qpow
1347
        return MPD_SSIZE_MAX;
1348
    }
1349
1350
    isneg = mpd_isnegative(a);
inline
            
mpd_isnegative should always be inlined (cost=always) 
mpd_qget_ssize
inline
            
mpd_isnegative inlined into mpd_qget_ssize 
mpd_qget_ssize
1351
    if (u <= MPD_SSIZE_MAX) {
1352
        return isneg ? -((mpd_ssize_t)u) : (mpd_ssize_t)u;
1353
    }
1354
    else if (isneg && u+(MPD_SSIZE_MIN+MPD_SSIZE_MAX) == MPD_SSIZE_MAX) {
1355
        return MPD_SSIZE_MIN;
1356
    }
1357
1358
    *status |= MPD_Invalid_operation;
1359
    return MPD_SSIZE_MAX;
1360
}
1361
1362
#if defined(CONFIG_32) && !defined(LEGACY_COMPILER)
1363
/*
1364
 * Quietly get a uint64_t from a decimal. If the operation is impossible,
1365
 * MPD_Invalid_operation is set.
1366
 */
1367
static uint64_t
1368
_c32_qget_u64(int use_sign, const mpd_t *a, uint32_t *status)
1369
{
1370
    MPD_NEW_STATIC(tmp,0,0,20,3);
1371
    mpd_context_t maxcontext;
1372
    uint64_t ret;
1373
1374
    tmp_data[0] = 709551615;
1375
    tmp_data[1] = 446744073;
1376
    tmp_data[2] = 18;
1377
1378
    if (mpd_isspecial(a)) {
1379
        *status |= MPD_Invalid_operation;
1380
        return UINT64_MAX;
1381
    }
1382
    if (mpd_iszero(a)) {
1383
        return 0;
1384
    }
1385
    if (use_sign && mpd_isnegative(a)) {
1386
        *status |= MPD_Invalid_operation;
1387
        return UINT64_MAX;
1388
    }
1389
    if (!_mpd_isint(a)) {
1390
        *status |= MPD_Invalid_operation;
1391
        return UINT64_MAX;
1392
    }
1393
1394
    if (_mpd_cmp_abs(a, &tmp) > 0) {
1395
        *status |= MPD_Invalid_operation;
1396
        return UINT64_MAX;
1397
    }
1398
1399
    mpd_maxcontext(&maxcontext);
1400
    mpd_qrescale(&tmp, a, 0, &maxcontext, &maxcontext.status);
1401
    maxcontext.status &= ~MPD_Rounded;
1402
    if (maxcontext.status != 0) {
1403
        *status |= (maxcontext.status|MPD_Invalid_operation); /* GCOV_NOT_REACHED */
1404
        return UINT64_MAX; /* GCOV_NOT_REACHED */
1405
    }
1406
1407
    ret = 0;
1408
    switch (tmp.len) {
1409
    case 3:
1410
        ret += (uint64_t)tmp_data[2] * 1000000000000000000ULL;
1411
    case 2:
1412
        ret += (uint64_t)tmp_data[1] * 1000000000ULL;
1413
    case 1:
1414
        ret += tmp_data[0];
1415
        break;
1416
    default:
1417
        abort(); /* GCOV_NOT_REACHED */
1418
    }
1419
1420
    return ret;
1421
}
1422
1423
static int64_t
1424
_c32_qget_i64(const mpd_t *a, uint32_t *status)
1425
{
1426
    uint64_t u;
1427
    int isneg;
1428
1429
    u = _c32_qget_u64(0, a, status);
1430
    if (*status&MPD_Invalid_operation) {
1431
        return INT64_MAX;
1432
    }
1433
1434
    isneg = mpd_isnegative(a);
1435
    if (u <= INT64_MAX) {
1436
        return isneg ? -((int64_t)u) : (int64_t)u;
1437
    }
1438
    else if (isneg && u+(INT64_MIN+INT64_MAX) == INT64_MAX) {
1439
        return INT64_MIN;
1440
    }
1441
1442
    *status |= MPD_Invalid_operation;
1443
    return INT64_MAX;
1444
}
1445
#endif /* CONFIG_32 && !LEGACY_COMPILER */
1446
1447
#ifdef CONFIG_64
1448
/* quietly get a uint64_t from a decimal */
1449
uint64_t
1450
mpd_qget_u64(const mpd_t *a, uint32_t *status)
1451
{
1452
    return mpd_qget_uint(a, status);
inline
           
mpd_qget_uint can be inlined into mpd_qget_u64 with cost=5 (threshold=375) 
mpd_qget_u64
inline
           
mpd_qget_uint inlined into mpd_qget_u64 
mpd_qget_u64
1453
}
1454
1455
/* quietly get an int64_t from a decimal */
1456
int64_t
1457
mpd_qget_i64(const mpd_t *a, uint32_t *status)
1458
{
1459
    return mpd_qget_ssize(a, status);
inline
           
mpd_qget_ssize can be inlined into mpd_qget_i64 with cost=85 (threshold=250) 
mpd_qget_i64
inline
           
mpd_qget_ssize inlined into mpd_qget_i64 
mpd_qget_i64
1460
}
1461
1462
/* quietly get a uint32_t from a decimal */
1463
uint32_t
1464
mpd_qget_u32(const mpd_t *a, uint32_t *status)
1465
{
1466
    uint64_t x = mpd_qget_uint(a, status);
inline
                 
mpd_qget_uint can be inlined into mpd_qget_u32 with cost=5 (threshold=375) 
mpd_qget_u32
inline
                 
mpd_qget_uint inlined into mpd_qget_u32 
mpd_qget_u32
1467
1468
    if (*status&MPD_Invalid_operation) {
gvn
        
load of type i32 not eliminated because it is clobbered by call 
mpd_qget_u32
1469
        return UINT32_MAX;
1470
    }
1471
    if (x > UINT32_MAX) {
1472
        *status |= MPD_Invalid_operation;
1473
        return UINT32_MAX;
1474
    }
1475
1476
    return (uint32_t)x;
1477
}
1478
1479
/* quietly get an int32_t from a decimal */
1480
int32_t
1481
mpd_qget_i32(const mpd_t *a, uint32_t *status)
1482
{
1483
    int64_t x = mpd_qget_ssize(a, status);
inline
                
mpd_qget_ssize can be inlined into mpd_qget_i32 with cost=85 (threshold=250) 
mpd_qget_i32
inline
                
mpd_qget_ssize inlined into mpd_qget_i32 
mpd_qget_i32
1484
1485
    if (*status&MPD_Invalid_operation) {
gvn
        
load of type i32 eliminated in favor of phi 
mpd_qget_i32
1486
        return INT32_MAX;
1487
    }
1488
    if (x < INT32_MIN || x > INT32_MAX) {
1489
        *status |= MPD_Invalid_operation;
1490
        return INT32_MAX;
1491
    }
1492
1493
    return (int32_t)x;
1494
}
1495
#else
1496
#ifndef LEGACY_COMPILER
1497
/* quietly get a uint64_t from a decimal */
1498
uint64_t
1499
mpd_qget_u64(const mpd_t *a, uint32_t *status)
1500
{
1501
    return _c32_qget_u64(1, a, status);
1502
}
1503
1504
/* quietly get an int64_t from a decimal */
1505
int64_t
1506
mpd_qget_i64(const mpd_t *a, uint32_t *status)
1507
{
1508
    return _c32_qget_i64(a, status);
1509
}
1510
#endif
1511
1512
/* quietly get a uint32_t from a decimal */
1513
uint32_t
1514
mpd_qget_u32(const mpd_t *a, uint32_t *status)
1515
{
1516
    return mpd_qget_uint(a, status);
1517
}
1518
1519
/* quietly get an int32_t from a decimal */
1520
int32_t
1521
mpd_qget_i32(const mpd_t *a, uint32_t *status)
1522
{
1523
    return mpd_qget_ssize(a, status);
1524
}
1525
#endif
1526
1527
1528
/******************************************************************************/
1529
/*         Filtering input of functions, finalizing output of functions       */
1530
/******************************************************************************/
1531
1532
/*
1533
 * Check if the operand is NaN, copy to result and return 1 if this is
1534
 * the case. Copying can fail since NaNs are allowed to have a payload that
1535
 * does not fit in MPD_MINALLOC.
1536
 */
1537
int
1538
mpd_qcheck_nan(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
1539
               uint32_t *status)
1540
{
1541
    if (mpd_isnan(a)) {
inline
        
mpd_isnan should always be inlined (cost=always) 
mpd_qcheck_nan
inline
        
mpd_isnan inlined into mpd_qcheck_nan 
mpd_qcheck_nan
1542
        *status |= mpd_issnan(a) ? MPD_Invalid_operation : 0;
inline
                   
mpd_issnan should always be inlined (cost=always) 
mpd_qcheck_nan
inline
                   
mpd_issnan inlined into mpd_qcheck_nan 
mpd_qcheck_nan
1543
        mpd_qcopy(result, a, status);
inline
        
mpd_qcopy can be inlined into mpd_qcheck_nan with cost=215 (threshold=250) 
mpd_qcheck_nan
inline
        
mpd_qcopy inlined into mpd_qcheck_nan 
mpd_qcheck_nan
1544
        mpd_set_qnan(result);
inline
        
mpd_set_qnan should always be inlined (cost=always) 
mpd_qcheck_nan
inline
        
mpd_set_qnan inlined into mpd_qcheck_nan 
mpd_qcheck_nan
1545
        _mpd_fix_nan(result, ctx);
inline
        
_mpd_fix_nan too costly to inline (cost=630, threshold=625) 
mpd_qcheck_nan
inline
        
_mpd_fix_nan will not be inlined into mpd_qcheck_nan 
mpd_qcheck_nan
1546
        return 1;
1547
    }
1548
    return 0;
1549
}
1550
1551
/*
1552
 * Check if either operand is NaN, copy to result and return 1 if this
1553
 * is the case. Copying can fail since NaNs are allowed to have a payload
1554
 * that does not fit in MPD_MINALLOC.
1555
 */
1556
int
1557
mpd_qcheck_nans(mpd_t *result, const mpd_t *a, const mpd_t *b,
1558
                const mpd_context_t *ctx, uint32_t *status)
1559
{
1560
    if ((a->flags|b->flags)&(MPD_NAN|MPD_SNAN)) {
1561
        const mpd_t *choice = b;
1562
        if (mpd_issnan(a)) {
inline
            
mpd_issnan should always be inlined (cost=always) 
mpd_qcheck_nans
inline
            
mpd_issnan inlined into mpd_qcheck_nans 
mpd_qcheck_nans
1563
            choice = a;
1564
            *status |= MPD_Invalid_operation;
1565
        }
1566
        else if (mpd_issnan(b)) {
inline
                 
mpd_issnan should always be inlined (cost=always) 
mpd_qcheck_nans
inline
                 
mpd_issnan inlined into mpd_qcheck_nans 
mpd_qcheck_nans
1567
            *status |= MPD_Invalid_operation;
1568
        }
1569
        else if (mpd_isqnan(a)) {
inline
                 
mpd_isqnan should always be inlined (cost=always) 
mpd_qcheck_nans
inline
                 
mpd_isqnan inlined into mpd_qcheck_nans 
mpd_qcheck_nans
1570
            choice = a;
1571
        }
1572
        mpd_qcopy(result, choice, status);
inline
        
mpd_qcopy can be inlined into mpd_qcheck_nans with cost=215 (threshold=250) 
mpd_qcheck_nans
inline
        
mpd_qcopy inlined into mpd_qcheck_nans 
mpd_qcheck_nans
1573
        mpd_set_qnan(result);
inline
        
mpd_set_qnan should always be inlined (cost=always) 
mpd_qcheck_nans
inline
        
mpd_set_qnan inlined into mpd_qcheck_nans 
mpd_qcheck_nans
1574
        _mpd_fix_nan(result, ctx);
inline
        
_mpd_fix_nan too costly to inline (cost=630, threshold=625) 
mpd_qcheck_nans
inline
        
_mpd_fix_nan will not be inlined into mpd_qcheck_nans 
mpd_qcheck_nans
1575
        return 1;
1576
    }
1577
    return 0;
1578
}
1579
1580
/*
1581
 * Check if one of the operands is NaN, copy to result and return 1 if this
1582
 * is the case. Copying can fail since NaNs are allowed to have a payload
1583
 * that does not fit in MPD_MINALLOC.
1584
 */
1585
static int
1586
mpd_qcheck_3nans(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_t *c,
1587
                 const mpd_context_t *ctx, uint32_t *status)
1588
{
1589
    if ((a->flags|b->flags|c->flags)&(MPD_NAN|MPD_SNAN)) {
gvn
            
load of type i8 eliminated in favor of load 
mpd_qpowmod
gvn
                     
load eliminated by PRE 
mpd_qpowmod
1590
        const mpd_t *choice = c;
1591
        if (mpd_issnan(a)) {
inline
            
mpd_issnan should always be inlined (cost=always) 
mpd_qcheck_3nans
inline
            
mpd_issnan inlined into mpd_qcheck_3nans 
mpd_qcheck_3nans
1592
            choice = a;
1593
            *status |= MPD_Invalid_operation;
1594
        }
1595
        else if (mpd_issnan(b)) {
inline
                 
mpd_issnan should always be inlined (cost=always) 
mpd_qcheck_3nans
inline
                 
mpd_issnan inlined into mpd_qcheck_3nans 
mpd_qcheck_3nans
1596
            choice = b;
1597
            *status |= MPD_Invalid_operation;
1598
        }
1599
        else if (mpd_issnan(c)) {
inline
                 
mpd_issnan should always be inlined (cost=always) 
mpd_qcheck_3nans
inline
                 
mpd_issnan inlined into mpd_qcheck_3nans 
mpd_qcheck_3nans
1600
            *status |= MPD_Invalid_operation;
1601
        }
1602
        else if (mpd_isqnan(a)) {
inline
                 
mpd_isqnan should always be inlined (cost=always) 
mpd_qcheck_3nans
inline
                 
mpd_isqnan inlined into mpd_qcheck_3nans 
mpd_qcheck_3nans
1603
            choice = a;
1604
        }
1605
        else if (mpd_isqnan(b)) {
inline
                 
mpd_isqnan should always be inlined (cost=always) 
mpd_qcheck_3nans
inline
                 
mpd_isqnan inlined into mpd_qcheck_3nans 
mpd_qcheck_3nans
1606
            choice = b;
1607
        }
1608
        mpd_qcopy(result, choice, status);
inline
        
mpd_qcopy can be inlined into mpd_qcheck_3nans with cost=215 (threshold=250) 
mpd_qcheck_3nans
inline
        
mpd_qcopy inlined into mpd_qcheck_3nans 
mpd_qcheck_3nans
1609
        mpd_set_qnan(result);
inline
        
mpd_set_qnan should always be inlined (cost=always) 
mpd_qcheck_3nans
inline
        
mpd_set_qnan inlined into mpd_qcheck_3nans 
mpd_qcheck_3nans
1610
        _mpd_fix_nan(result, ctx);
inline
        
_mpd_fix_nan too costly to inline (cost=630, threshold=625) 
mpd_qcheck_3nans
inline
        
_mpd_fix_nan will not be inlined into mpd_qcheck_3nans 
mpd_qcheck_3nans
inline
        
_mpd_fix_nan too costly to inline (cost=630, threshold=625) 
mpd_qpowmod
inline
        
_mpd_fix_nan will not be inlined into mpd_qpowmod 
mpd_qpowmod
1611
        return 1;
1612
    }
1613
    return 0;
1614
}
1615
1616
/* Check if rounding digit 'rnd' leads to an increment. */
1617
static inline int
1618
_mpd_rnd_incr(const mpd_t *dec, mpd_uint_t rnd, const mpd_context_t *ctx)
1619
{
1620
    int ld;
1621
1622
    switch (ctx->round) {
1623
    case MPD_ROUND_DOWN: case MPD_ROUND_TRUNC:
1624
        return 0;
1625
    case MPD_ROUND_HALF_UP:
1626
        return (rnd >= 5);
1627
    case MPD_ROUND_HALF_EVEN:
1628
        return (rnd > 5) || ((rnd == 5) && mpd_isoddcoeff(dec));
inline
                                           
mpd_isoddcoeff should always be inlined (cost=always) 
_mpd_rnd_incr
inline
                                           
mpd_isoddcoeff inlined into _mpd_rnd_incr 
_mpd_rnd_incr
1629
    case MPD_ROUND_CEILING:
1630
        return !(rnd == 0 || mpd_isnegative(dec));
inline
                             
mpd_isnegative should always be inlined (cost=always) 
_mpd_rnd_incr
inline
                             
mpd_isnegative inlined into _mpd_rnd_incr 
_mpd_rnd_incr
1631
    case MPD_ROUND_FLOOR:
1632
        return !(rnd == 0 || mpd_ispositive(dec));
inline
                             
mpd_ispositive should always be inlined (cost=always) 
_mpd_rnd_incr
inline
                             
mpd_ispositive inlined into _mpd_rnd_incr 
_mpd_rnd_incr
1633
    case MPD_ROUND_HALF_DOWN:
1634
        return (rnd > 5);
1635
    case MPD_ROUND_UP:
1636
        return !(rnd == 0);
1637
    case MPD_ROUND_05UP:
1638
        ld = (int)mpd_lsd(dec->data[0]);
inline
                  
mpd_lsd should always be inlined (cost=always) 
_mpd_rnd_incr
inline
                  
mpd_lsd inlined into _mpd_rnd_incr 
_mpd_rnd_incr
gvn
                               
load of type i64* not eliminated because it is clobbered by call 
_mpd_check_round
gvn
                               
load of type i64* not eliminated because it is clobbered by call 
mpd_qfinalize
gvn
                               
load of type i64* not eliminated because it is clobbered by call 
mpd_qquantize
1639
        return (!(rnd == 0) && (ld == 0 || ld == 5));
1640
    default:
1641
        /* Without a valid context, further results will be undefined. */
1642
        return 0; /* GCOV_NOT_REACHED */
1643
    }
1644
}
1645
1646
/*
1647
 * Apply rounding to a decimal that has been right-shifted into a full
1648
 * precision decimal. If an increment leads to an overflow of the precision,
1649
 * adjust the coefficient and the exponent and check the new exponent for
1650
 * overflow.
1651
 */
1652
static inline void
1653
_mpd_apply_round(mpd_t *dec, mpd_uint_t rnd, const mpd_context_t *ctx,
1654
                 uint32_t *status)
1655
{
1656
    if (_mpd_rnd_incr(dec, rnd, ctx)) {
inline
        
_mpd_rnd_incr can be inlined into _mpd_apply_round with cost=145 (threshold=325) 
_mpd_apply_round
inline
        
_mpd_rnd_incr inlined into _mpd_apply_round 
_mpd_apply_round
1657
        /* We have a number with exactly ctx->prec digits. The increment
1658
         * can only lead to an overflow if the decimal is all nines. In
1659
         * that case, the result is a power of ten with prec+1 digits.
1660
         *
1661
         * If the precision is a multiple of MPD_RDIGITS, this situation is
1662
         * detected by _mpd_baseincr returning a carry.
1663
         * If the precision is not a multiple of MPD_RDIGITS, we have to
1664
         * check if the result has one digit too many.
1665
         */
1666
        mpd_uint_t carry = _mpd_baseincr(dec->data, dec->len);
inline
                           
_mpd_baseincr will not be inlined into _mpd_apply_round because its definition is unavailable 
_mpd_apply_round
gvn
                                              
load of type i64* not eliminated because it is clobbered by call 
_mpd_check_round
gvn
                                                         
load of type i64 not eliminated because it is clobbered by call 
_mpd_check_round
gvn
                                              
load of type i64* not eliminated because it is clobbered by call 
mpd_qfinalize
gvn
                                                         
load of type i64 not eliminated because it is clobbered by call 
mpd_qfinalize
1667
        if (carry) {
1668
            dec->data[dec->len-1] = mpd_pow10[MPD_RDIGITS-1];
gvn
                 
load of type i64* not eliminated in favor of load because it is clobbered by call 
_mpd_apply_round
gvn
                           
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_apply_round
gvn
                 
load of type i64* not eliminated in favor of load because it is clobbered by call 
_mpd_check_round
gvn
                           
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_check_round
gvn
                 
load of type i64* not eliminated in favor of load because it is clobbered by call 
mpd_qfinalize
gvn
                           
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qfinalize
1669
            dec->exp += 1;
gvn
                     
load of type i64 not eliminated because it is clobbered by store 
_mpd_apply_round
gvn
                     
load of type i64 not eliminated because it is clobbered by store 
_mpd_check_round
gvn
                     
load of type i64 not eliminated because it is clobbered by store 
mpd_qfinalize
1670
            _mpd_check_exp(dec, ctx, status);
inline
            
_mpd_check_exp too costly to inline (cost=815, threshold=812) 
_mpd_apply_round
inline
            
_mpd_check_exp will not be inlined into _mpd_apply_round 
_mpd_apply_round
inline
            
_mpd_check_exp too costly to inline (cost=815, threshold=812) 
_mpd_check_round
inline
            
_mpd_check_exp will not be inlined into _mpd_check_round 
_mpd_check_round
inline
            
_mpd_check_exp too costly to inline (cost=815, threshold=812) 
mpd_qfinalize
inline
            
_mpd_check_exp will not be inlined into mpd_qfinalize 
mpd_qfinalize
1671
            return;
1672
        }
1673
        mpd_setdigits(dec);
inline
        
mpd_setdigits can be inlined into _mpd_apply_round with cost=295 (threshold=325) 
_mpd_apply_round
inline
        
mpd_setdigits inlined into _mpd_apply_round 
_mpd_apply_round
1674
        if (dec->digits > ctx->prec) {
gvn
                               
load of type i64 not eliminated because it is clobbered by call 
_mpd_apply_round
gvn
                               
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_check_round
gvn
                               
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qfinalize
1675
            mpd_qshiftr_inplace(dec, 1);
inline
            
mpd_qshiftr_inplace too costly to inline (cost=470, threshold=250) 
_mpd_apply_round
inline
            
mpd_qshiftr_inplace will not be inlined into _mpd_apply_round 
_mpd_apply_round
inline
            
mpd_qshiftr_inplace too costly to inline (cost=470, threshold=250) 
_mpd_check_round
inline
            
mpd_qshiftr_inplace will not be inlined into _mpd_check_round 
_mpd_check_round
inline
            
mpd_qshiftr_inplace too costly to inline (cost=470, threshold=250) 
mpd_qfinalize
inline
            
mpd_qshiftr_inplace will not be inlined into mpd_qfinalize 
mpd_qfinalize
1676
            dec->exp += 1;
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
_mpd_apply_round
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
_mpd_check_round
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qfinalize
1677
            dec->digits = ctx->prec;
gvn
                               
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_apply_round
gvn
                               
load of type i64 not eliminated because it is clobbered by call 
_mpd_check_round
gvn
                               
load of type i64 not eliminated because it is clobbered by call 
mpd_qfinalize
1678
            _mpd_check_exp(dec, ctx, status);
inline
            
_mpd_check_exp too costly to inline (cost=815, threshold=812) 
_mpd_apply_round
inline
            
_mpd_check_exp will not be inlined into _mpd_apply_round 
_mpd_apply_round
inline
            
_mpd_check_exp too costly to inline (cost=815, threshold=812) 
_mpd_check_round
inline
            
_mpd_check_exp will not be inlined into _mpd_check_round 
_mpd_check_round
inline
            
_mpd_check_exp too costly to inline (cost=815, threshold=812) 
mpd_qfinalize
inline
            
_mpd_check_exp will not be inlined into mpd_qfinalize 
mpd_qfinalize
1679
        }
1680
    }
1681
}
1682
1683
/*
1684
 * Apply rounding to a decimal. Allow overflow of the precision.
1685
 */
1686
static inline void
1687
_mpd_apply_round_excess(mpd_t *dec, mpd_uint_t rnd, const mpd_context_t *ctx,
1688
                        uint32_t *status)
1689
{
1690
    if (_mpd_rnd_incr(dec, rnd, ctx)) {
inline
        
_mpd_rnd_incr can be inlined into _mpd_apply_round_excess with cost=145 (threshold=325) 
_mpd_apply_round_excess
inline
        
_mpd_rnd_incr inlined into _mpd_apply_round_excess 
_mpd_apply_round_excess
1691
        mpd_uint_t carry = _mpd_baseincr(dec->data, dec->len);
inline
                           
_mpd_baseincr will not be inlined into _mpd_apply_round_excess because its definition is unavailable 
_mpd_apply_round_excess
1692
        if (carry) {
1693
            if (!mpd_qresize(dec, dec->len+1, status)) {
inline
                 
mpd_qresize should always be inlined (cost=always) 
_mpd_apply_round_excess
inline
                 
mpd_qresize inlined into _mpd_apply_round_excess 
_mpd_apply_round_excess
gvn
                                       
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_apply_round_excess
1694
                return;
1695
            }
1696
            dec->data[dec->len] = 1;
gvn
                 
load of type i64* not eliminated in favor of load because it is clobbered by call 
_mpd_apply_round_excess
gvn
                           
load of type i64 not eliminated because it is clobbered by call 
_mpd_apply_round_excess
gvn
                           
load eliminated by PRE 
_mpd_apply_round_excess
1697
            dec->len += 1;
gvn
                     
load of type i64 not eliminated because it is clobbered by store 
_mpd_apply_round_excess
1698
        }
1699
        mpd_setdigits(dec);
inline
        
mpd_setdigits can be inlined into _mpd_apply_round_excess with cost=295 (threshold=325) 
_mpd_apply_round_excess
inline
        
mpd_setdigits inlined into _mpd_apply_round_excess 
_mpd_apply_round_excess
1700
    }
1701
}
1702
1703
/*
1704
 * Apply rounding to a decimal that has been right-shifted into a decimal
1705
 * with full precision or less. Return failure if an increment would
1706
 * overflow the precision.
1707
 */
1708
static inline int
1709
_mpd_apply_round_fit(mpd_t *dec, mpd_uint_t rnd, const mpd_context_t *ctx,
1710
                     uint32_t *status)
1711
{
1712
    if (_mpd_rnd_incr(dec, rnd, ctx)) {
inline
        
_mpd_rnd_incr can be inlined into _mpd_apply_round_fit with cost=-14855 (threshold=325) 
_mpd_apply_round_fit
inline
        
_mpd_rnd_incr inlined into _mpd_apply_round_fit 
_mpd_apply_round_fit
1713
        mpd_uint_t carry = _mpd_baseincr(dec->data, dec->len);
inline
                           
_mpd_baseincr will not be inlined into _mpd_apply_round_fit because its definition is unavailable 
_mpd_apply_round_fit
gvn
                                              
load of type i64* not eliminated because it is clobbered by call 
mpd_qquantize
gvn
                                                         
load of type i64 not eliminated because it is clobbered by call 
mpd_qquantize
1714
        if (carry) {
1715
            if (!mpd_qresize(dec, dec->len+1, status)) {
inline
                 
mpd_qresize should always be inlined (cost=always) 
_mpd_apply_round_fit
inline
                 
mpd_qresize inlined into _mpd_apply_round_fit 
_mpd_apply_round_fit
gvn
                                       
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_apply_round_fit
gvn
                                       
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qquantize
1716
                return 0;
1717
            }
1718
            dec->data[dec->len] = 1;
gvn
                 
load of type i64* not eliminated in favor of load because it is clobbered by call 
_mpd_apply_round_fit
gvn
                           
load of type i64 not eliminated because it is clobbered by call 
_mpd_apply_round_fit
gvn
                           
load eliminated by PRE 
_mpd_apply_round_fit
gvn
                 
load of type i64* not eliminated in favor of load because it is clobbered by call 
mpd_qquantize
1719
            dec->len += 1;
gvn
                     
load of type i64 not eliminated because it is clobbered by store 
_mpd_apply_round_fit
gvn
                     
load of type i64 not eliminated because it is clobbered by store 
mpd_qquantize
1720
        }
1721
        mpd_setdigits(dec);
inline
        
mpd_setdigits can be inlined into _mpd_apply_round_fit with cost=295 (threshold=325) 
_mpd_apply_round_fit
inline
        
mpd_setdigits inlined into _mpd_apply_round_fit 
_mpd_apply_round_fit
1722
        if (dec->digits > ctx->prec) {
gvn
                               
load of type i64 not eliminated because it is clobbered by store 
_mpd_apply_round_fit
gvn
                               
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qquantize
1723
            mpd_seterror(dec, MPD_Invalid_operation, status);
inline
            
mpd_seterror can be inlined into _mpd_apply_round_fit with cost=130 (threshold=250) 
_mpd_apply_round_fit
inline
            
mpd_seterror inlined into _mpd_apply_round_fit 
_mpd_apply_round_fit
1724
            return 0;
1725
        }
1726
    }
1727
    return 1;
1728
}
1729
1730
/* Check a normal number for overflow, underflow, clamping. If the operand
1731
   is modified, it will be zero, special or (sub)normal with a coefficient
1732
   that fits into the current context precision. */
1733
static inline void
1734
_mpd_check_exp(mpd_t *dec, const mpd_context_t *ctx, uint32_t *status)
1735
{
1736
    mpd_ssize_t adjexp, etiny, shift;
1737
    int rnd;
1738
1739
    adjexp = mpd_adjexp(dec);
inline
             
mpd_adjexp should always be inlined (cost=always) 
_mpd_check_exp
inline
             
mpd_adjexp inlined into _mpd_check_exp 
_mpd_check_exp
1740
    if (adjexp > ctx->emax) {
1741
1742
        if (mpd_iszerocoeff(dec)) {
inline
            
mpd_iszerocoeff should always be inlined (cost=always) 
_mpd_check_exp
inline
            
mpd_iszerocoeff inlined into _mpd_check_exp 
_mpd_check_exp
1743
            dec->exp = ctx->emax;
1744
            if (ctx->clamp) {
1745
                dec->exp -= (ctx->prec-1);
1746
            }
1747
            mpd_zerocoeff(dec);
inline
            
mpd_zerocoeff can be inlined into _mpd_check_exp with cost=120 (threshold=250) 
_mpd_check_exp
inline
            
mpd_zerocoeff inlined into _mpd_check_exp 
_mpd_check_exp
1748
            *status |= MPD_Clamped;
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
_mpd_check_exp
1749
            return;
1750
        }
1751
1752
        switch (ctx->round) {
1753
        case MPD_ROUND_HALF_UP: case MPD_ROUND_HALF_EVEN:
1754
        case MPD_ROUND_HALF_DOWN: case MPD_ROUND_UP:
1755
        case MPD_ROUND_TRUNC:
1756
            mpd_setspecial(dec, mpd_sign(dec), MPD_INF);
inline
                                
mpd_sign should always be inlined (cost=always) 
_mpd_check_exp
inline
                                
mpd_sign inlined into _mpd_check_exp 
_mpd_check_exp
inline
            
mpd_setspecial can be inlined into _mpd_check_exp with cost=120 (threshold=250) 
_mpd_check_exp
inline
            
mpd_setspecial inlined into _mpd_check_exp 
_mpd_check_exp
1757
            break;
1758
        case MPD_ROUND_DOWN: case MPD_ROUND_05UP:
1759
            mpd_qmaxcoeff(dec, ctx, status);
inline
            
mpd_qmaxcoeff too costly to inline (cost=250, threshold=250) 
_mpd_check_exp
inline
            
mpd_qmaxcoeff will not be inlined into _mpd_check_exp 
_mpd_check_exp
1760
            dec->exp = ctx->emax - ctx->prec + 1;
gvn
                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_check_exp
gvn
                                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_check_exp
1761
            break;
1762
        case MPD_ROUND_CEILING:
1763
            if (mpd_isnegative(dec)) {
inline
                
mpd_isnegative should always be inlined (cost=always) 
_mpd_check_exp
inline
                
mpd_isnegative inlined into _mpd_check_exp 
_mpd_check_exp
1764
                mpd_qmaxcoeff(dec, ctx, status);
inline
                
mpd_qmaxcoeff too costly to inline (cost=250, threshold=250) 
_mpd_check_exp
inline
                
mpd_qmaxcoeff will not be inlined into _mpd_check_exp 
_mpd_check_exp
1765
                dec->exp = ctx->emax - ctx->prec + 1;
gvn
                                
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_check_exp
gvn
                                            
load of type i64 not eliminated because it is clobbered by call 
_mpd_check_exp
1766
            }
1767
            else {
1768
                mpd_setspecial(dec, MPD_POS, MPD_INF);
inline
                
mpd_setspecial can be inlined into _mpd_check_exp with cost=115 (threshold=250) 
_mpd_check_exp
inline
                
mpd_setspecial inlined into _mpd_check_exp 
_mpd_check_exp
1769
            }
1770
            break;
1771
        case MPD_ROUND_FLOOR:
1772
            if (mpd_ispositive(dec)) {
inline
                
mpd_ispositive should always be inlined (cost=always) 
_mpd_check_exp
inline
                
mpd_ispositive inlined into _mpd_check_exp 
_mpd_check_exp
1773
                mpd_qmaxcoeff(dec, ctx, status);
inline
                
mpd_qmaxcoeff too costly to inline (cost=250, threshold=250) 
_mpd_check_exp
inline
                
mpd_qmaxcoeff will not be inlined into _mpd_check_exp 
_mpd_check_exp
1774
                dec->exp = ctx->emax - ctx->prec + 1;
gvn
                                
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_check_exp
gvn
                                            
load of type i64 not eliminated because it is clobbered by call 
_mpd_check_exp
1775
            }
1776
            else {
1777
                mpd_setspecial(dec, MPD_NEG, MPD_INF);
inline
                
mpd_setspecial can be inlined into _mpd_check_exp with cost=115 (threshold=250) 
_mpd_check_exp
inline
                
mpd_setspecial inlined into _mpd_check_exp 
_mpd_check_exp
1778
            }
1779
            break;
1780
        default: /* debug */
1781
            abort(); /* GCOV_NOT_REACHED */
inline
            
abort will not be inlined into _mpd_check_exp because its definition is unavailable 
_mpd_check_exp
1782
        }
1783
1784
        *status |= MPD_Overflow|MPD_Inexact|MPD_Rounded;
gvn
                
load of type i32 not eliminated because it is clobbered by call 
_mpd_check_exp
1785
1786
    } /* fold down */
1787
    else if (ctx->clamp && dec->exp > mpd_etop(ctx)) {
inline
                                      
mpd_etop should always be inlined (cost=always) 
_mpd_check_exp
inline
                                      
mpd_etop inlined into _mpd_check_exp 
_mpd_check_exp
1788
        /* At this point adjexp=exp+digits-1 <= emax and exp > etop=emax-prec+1:
1789
         *   (1) shift = exp -emax+prec-1 > 0
1790
         *   (2) digits+shift = exp+digits-1 - emax + prec <= prec */
1791
        shift = dec->exp - mpd_etop(ctx);
inline
                           
mpd_etop should always be inlined (cost=always) 
_mpd_check_exp
inline
                           
mpd_etop inlined into _mpd_check_exp 
_mpd_check_exp
1792
        if (!mpd_qshiftl(dec, dec, shift, status)) {
inline
             
mpd_qshiftl too costly to inline (cost=320, threshold=250) 
_mpd_check_exp
inline
             
mpd_qshiftl will not be inlined into _mpd_check_exp 
_mpd_check_exp
1793
            return;
1794
        }
1795
        dec->exp -= shift;
gvn
                 
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_check_exp
1796
        *status |= MPD_Clamped;
gvn
                
load of type i32 not eliminated because it is clobbered by call 
_mpd_check_exp
1797
        if (!mpd_iszerocoeff(dec) && adjexp < ctx->emin) {
inline
             
mpd_iszerocoeff should always be inlined (cost=always) 
_mpd_check_exp
inline
             
mpd_iszerocoeff inlined into _mpd_check_exp 
_mpd_check_exp
gvn
                                                   
load of type i64 not eliminated because it is clobbered by call 
_mpd_check_exp
1798
            /* Underflow is impossible, since exp < etiny=emin-prec+1
1799
             * and exp > etop=emax-prec+1 would imply emax < emin. */
1800
            *status |= MPD_Subnormal;
1801
        }
1802
    }
1803
    else if (adjexp < ctx->emin) {
1804
1805
        etiny = mpd_etiny(ctx);
inline
                
mpd_etiny should always be inlined (cost=always) 
_mpd_check_exp
inline
                
mpd_etiny inlined into _mpd_check_exp 
_mpd_check_exp
1806
1807
        if (mpd_iszerocoeff(dec)) {
inline
            
mpd_iszerocoeff should always be inlined (cost=always) 
_mpd_check_exp
inline
            
mpd_iszerocoeff inlined into _mpd_check_exp 
_mpd_check_exp
1808
            if (dec->exp < etiny) {
gvn
                     
load of type i64 eliminated in favor of load 
_mpd_check_exp
1809
                dec->exp = etiny;
1810
                mpd_zerocoeff(dec);
inline
                
mpd_zerocoeff can be inlined into _mpd_check_exp with cost=120 (threshold=250) 
_mpd_check_exp
inline
                
mpd_zerocoeff inlined into _mpd_check_exp 
_mpd_check_exp
1811
                *status |= MPD_Clamped;
gvn
                        
load of type i32 not eliminated because it is clobbered by call 
_mpd_check_exp
1812
            }
1813
            return;
1814
        }
1815
1816
        *status |= MPD_Subnormal;
1817
        if (dec->exp < etiny) {
1818
            /* At this point adjexp=exp+digits-1 < emin and exp < etiny=emin-prec+1:
1819
             *   (1) shift = emin-prec+1 - exp > 0
1820
             *   (2) digits-shift = exp+digits-1 - emin + prec < prec */
1821
            shift = etiny - dec->exp;
1822
            rnd = (int)mpd_qshiftr_inplace(dec, shift);
inline
                       
mpd_qshiftr_inplace too costly to inline (cost=475, threshold=250) 
_mpd_check_exp
inline
                       
mpd_qshiftr_inplace will not be inlined into _mpd_check_exp 
_mpd_check_exp
1823
            dec->exp = etiny;
1824
            /* We always have a spare digit in case of an increment. */
1825
            _mpd_apply_round_excess(dec, rnd, ctx, status);
inline
            
_mpd_apply_round_excess too costly to inline (cost=755, threshold=325) 
_mpd_check_exp
inline
            
_mpd_apply_round_excess will not be inlined into _mpd_check_exp 
_mpd_check_exp
1826
            *status |= MPD_Rounded;
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
_mpd_check_exp
1827
            if (rnd) {
1828
                *status |= (MPD_Inexact|MPD_Underflow);
1829
                if (mpd_iszerocoeff(dec)) {
inline
                    
mpd_iszerocoeff should always be inlined (cost=always) 
_mpd_check_exp
inline
                    
mpd_iszerocoeff inlined into _mpd_check_exp 
_mpd_check_exp
1830
                    mpd_zerocoeff(dec);
inline
                    
mpd_zerocoeff can be inlined into _mpd_check_exp with cost=120 (threshold=250) 
_mpd_check_exp
inline
                    
mpd_zerocoeff inlined into _mpd_check_exp 
_mpd_check_exp
1831
                    *status |= MPD_Clamped;
gvn
                            
load of type i32 not eliminated in favor of store because it is clobbered by call 
_mpd_check_exp
gvn
                            
load eliminated by PRE 
_mpd_check_exp
1832
                }
1833
            }
1834
        }
1835
        /* Case exp >= etiny=emin-prec+1:
1836
         *   (1) adjexp=exp+digits-1 < emin
1837
         *   (2) digits < emin-exp+1 <= prec */
1838
    }
1839
}
1840
1841
/* Transcendental functions do not always set Underflow reliably,
1842
 * since they only use as much precision as is necessary for correct
1843
 * rounding. If a result like 1.0000000000e-101 is finalized, there
1844
 * is no rounding digit that would trigger Underflow. But we can
1845
 * assume Inexact, so a short check suffices. */
1846
static inline void
1847
mpd_check_underflow(mpd_t *dec, const mpd_context_t *ctx, uint32_t *status)
1848
{
1849
    if (mpd_adjexp(dec) < ctx->emin && !mpd_iszero(dec) &&
inline
        
mpd_adjexp should always be inlined (cost=always) 
mpd_check_underflow
inline
        
mpd_adjexp inlined into mpd_check_underflow 
mpd_check_underflow
inline
                                        
mpd_iszero should always be inlined (cost=always) 
mpd_check_underflow
inline
                                        
mpd_iszero inlined into mpd_check_underflow 
mpd_check_underflow
1850
        dec->exp < mpd_etiny(ctx)) {
inline
                   
mpd_etiny should always be inlined (cost=always) 
mpd_check_underflow
inline
                   
mpd_etiny inlined into mpd_check_underflow 
mpd_check_underflow
gvn
             
load of type i64 eliminated in favor of load 
mpd_check_underflow
1851
        *status |= MPD_Underflow;
gvn
                
load of type i32 not eliminated because it is clobbered by call 
mpd_qexp
gvn
                
load of type i32 not eliminated because it is clobbered by call 
mpd_qln
gvn
                
load of type i32 not eliminated because it is clobbered by call 
mpd_qlog10
1852
    }
1853
}
1854
1855
/* Check if a normal number must be rounded after the exponent has been checked. */
1856
static inline void
1857
_mpd_check_round(mpd_t *dec, const mpd_context_t *ctx, uint32_t *status)
1858
{
1859
    mpd_uint_t rnd;
1860
    mpd_ssize_t shift;
1861
1862
    /* must handle specials: _mpd_check_exp() can produce infinities or NaNs */
1863
    if (mpd_isspecial(dec)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
_mpd_check_round
inline
        
mpd_isspecial inlined into _mpd_check_round 
_mpd_check_round
1864
        return;
1865
    }
1866
1867
    if (dec->digits > ctx->prec) {
gvn
             
load of type i64 not eliminated because it is clobbered by call 
mpd_qfinalize
gvn
                           
load of type i64 not eliminated because it is clobbered by call 
mpd_qfinalize
1868
        shift = dec->digits - ctx->prec;
1869
        rnd = mpd_qshiftr_inplace(dec, shift);
inline
              
mpd_qshiftr_inplace too costly to inline (cost=475, threshold=250) 
_mpd_check_round
inline
              
mpd_qshiftr_inplace will not be inlined into _mpd_check_round 
_mpd_check_round
inline
              
mpd_qshiftr_inplace too costly to inline (cost=475, threshold=250) 
mpd_qfinalize
inline
              
mpd_qshiftr_inplace will not be inlined into mpd_qfinalize 
mpd_qfinalize
1870
        dec->exp += shift;
gvn
                 
load of type i64 not eliminated because it is clobbered by call 
_mpd_check_round
gvn
                 
load of type i64 not eliminated because it is clobbered by call 
mpd_qfinalize
1871
        _mpd_apply_round(dec, rnd, ctx, status);
inline
        
_mpd_apply_round can be inlined into _mpd_check_round with cost=-14230 (threshold=325) 
_mpd_check_round
inline
        
_mpd_apply_round inlined into _mpd_check_round 
_mpd_check_round
1872
        *status |= MPD_Rounded;
gvn
                
load of type i32 not eliminated because it is clobbered by call 
_mpd_check_round
gvn
                
load of type i32 not eliminated because it is clobbered by call 
mpd_qfinalize
1873
        if (rnd) {
1874
            *status |= MPD_Inexact;
1875
        }
1876
    }
1877
}
1878
1879
/* Finalize all operations. */
1880
void
1881
mpd_qfinalize(mpd_t *result, const mpd_context_t *ctx, uint32_t *status)
1882
{
1883
    if (mpd_isspecial(result)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qfinalize
inline
        
mpd_isspecial inlined into mpd_qfinalize 
mpd_qfinalize
1884
        if (mpd_isnan(result)) {
inline
            
mpd_isnan should always be inlined (cost=always) 
mpd_qfinalize
inline
            
mpd_isnan inlined into mpd_qfinalize 
mpd_qfinalize
1885
            _mpd_fix_nan(result, ctx);
inline
            
_mpd_fix_nan too costly to inline (cost=630, threshold=625) 
mpd_qfinalize
inline
            
_mpd_fix_nan will not be inlined into mpd_qfinalize 
mpd_qfinalize
1886
        }
1887
        return;
1888
    }
1889
1890
    _mpd_check_exp(result, ctx, status);
inline
    
_mpd_check_exp too costly to inline (cost=815, threshold=812) 
mpd_qfinalize
inline
    
_mpd_check_exp will not be inlined into mpd_qfinalize 
mpd_qfinalize
1891
    _mpd_check_round(result, ctx, status);
inline
    
_mpd_check_round can be inlined into mpd_qfinalize with cost=-14060 (threshold=325) 
mpd_qfinalize
inline
    
_mpd_check_round inlined into mpd_qfinalize 
mpd_qfinalize
1892
}
1893
1894
1895
/******************************************************************************/
1896
/*                                 Copying                                    */
1897
/******************************************************************************/
1898
1899
/* Internal function: Copy a decimal, share data with src: USE WITH CARE! */
1900
static inline void
1901
_mpd_copy_shared(mpd_t *dest, const mpd_t *src)
1902
{
1903
    dest->flags = src->flags;
gvn
                       
load of type i8 eliminated in favor of load 
mpd_cmp_total
1904
    dest->exp = src->exp;
1905
    dest->digits = src->digits;
1906
    dest->len = src->len;
gvn
                     
load of type i64 eliminated in favor of load 
mpd_cmp_total
1907
    dest->alloc = src->alloc;
1908
    dest->data = src->data;
1909
1910
    mpd_set_shared_data(dest);
inline
    
mpd_set_shared_data should always be inlined (cost=always) 
_mpd_copy_shared
inline
    
mpd_set_shared_data inlined into _mpd_copy_shared 
_mpd_copy_shared
1911
}
1912
1913
/*
1914
 * Copy a decimal. In case of an error, status is set to MPD_Malloc_error.
1915
 */
1916
int
1917
mpd_qcopy(mpd_t *result, const mpd_t *a, uint32_t *status)
1918
{
1919
    if (result == a) return 1;
1920
1921
    if (!mpd_qresize(result, a->len, status)) {
inline
         
mpd_qresize should always be inlined (cost=always) 
mpd_qcopy
inline
         
mpd_qresize inlined into mpd_qcopy 
mpd_qcopy
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
mpd_qrotate
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
mpd_qscaleb
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
mpd_qshiftn
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
mpd_qshift
gvn
                                
load of type i64 eliminated in favor of load 
mpd_qminus
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
mpd_qminus
gvn
                                
load of type i64 eliminated in favor of load 
mpd_qplus
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
mpd_qplus
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
_mpd_qround_to_integral
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
_mpd_base_ndivmod
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
mpd_qdivmod
gvn
                                
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qexp
gvn
                                
load of type i64 eliminated in favor of load 
mpd_qexp
gvn
                                
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qln
gvn
                                
load of type i64 eliminated in favor of load 
mpd_qlog10
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
mpd_qmax
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
mpd_qmax_mag
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
mpd_qmin
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
mpd_qmin_mag
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
mpd_qnext_minus
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
mpd_qnext_plus
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
mpd_qnext_toward
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpow_int
gvn
                                
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qpow
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
mpd_qrem
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
mpd_qquantize
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
mpd_qreduce
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
mpd_qrem_near
gvn
                                
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qsqrt
1922
        return 0;
1923
    }
1924
1925
    mpd_copy_flags(result, a);
inline
    
mpd_copy_flags should always be inlined (cost=always) 
mpd_qcopy
inline
    
mpd_copy_flags inlined into mpd_qcopy 
mpd_qcopy
1926
    result->exp = a->exp;
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qcopy
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qshiftl
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qcheck_nan
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qcheck_nans
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qcopy_abs
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qcopy_negate
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qcopy_sign
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qshiftr
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qrotate
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qscaleb
gvn
                     
load eliminated by PRE 
mpd_qscaleb
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qshiftn
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qshift
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qminus
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qplus
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
_mpd_qround_to_integral
gvn
                     
load eliminated by PRE 
_mpd_qround_to_integral
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
_mpd_base_ndivmod
gvn
                     
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qdivmod
gvn
                     
load eliminated by PRE 
_mpd_qdivmod
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qdivmod
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpow_uint
gvn
                     
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qexp
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qexp
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
_mpd_qln
gvn
                     
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qln
gvn
                     
load eliminated by PRE 
mpd_qln
gvn
                     
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qlog10
gvn
                     
load eliminated by PRE 
mpd_qlog10
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qmax
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qmax_mag
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qmin
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qmin_mag
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qnext_minus
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qnext_plus
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qnext_toward
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpow_int
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpow_real
gvn
                     
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qpow
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qcheck_3nans
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
_mpd_qrescale
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qrem
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qpowmod
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qquantize
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qreduce
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qrem_near
gvn
                     
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qsqrt
licm
                
hosting bitcast 
mpd_qpowmod
1927
    result->digits = a->digits;
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qcopy
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qshiftl
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qcheck_nan
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qcheck_nans
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qcopy_abs
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qcopy_negate
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qcopy_sign
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qshiftr
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qrotate
gvn
                        
load eliminated by PRE 
mpd_qrotate
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qscaleb
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qshiftn
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qshift
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qminus
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qplus
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_qround_to_integral
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_base_ndivmod
gvn
                        
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qdivmod
gvn
                        
load eliminated by PRE 
_mpd_qdivmod
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qdivmod
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpow_uint
gvn
                        
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qexp
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qexp
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_qln
gvn
                        
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qln
gvn
                        
load eliminated by PRE 
mpd_qln
gvn
                        
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qlog10
gvn
                        
load eliminated by PRE 
mpd_qlog10
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qmax
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qmax_mag
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qmin
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qmin_mag
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qnext_minus
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qnext_plus
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qnext_toward
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpow_int
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpow_real
gvn
                        
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qpow
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qcheck_3nans
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_qrescale
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qrem
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qpowmod
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qquantize
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qreduce
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qrem_near
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qsqrt
1928
    result->len = a->len;
gvn
                     
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qcopy
gvn
                     
load eliminated by PRE 
mpd_qcopy
1929
    memcpy(result->data, a->data, a->len * (sizeof *result->data));
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qcopy
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qcopy
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qcopy
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qshiftl
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qshiftl
gvn
                            
load eliminated by PRE 
mpd_qshiftl
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qshiftl
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qcheck_nan
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qcheck_nan
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qcheck_nan
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qcheck_nans
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qcheck_nans
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qcheck_nans
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qcopy_abs
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qcopy_abs
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qcopy_abs
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qcopy_negate
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qcopy_negate
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qcopy_negate
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qcopy_sign
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qcopy_sign
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qcopy_sign
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qshiftr
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qshiftr
gvn
                            
load eliminated by PRE 
mpd_qshiftr
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qshiftr
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qrotate
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qrotate
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qrotate
gvn
                                     
load of type i64 eliminated in favor of phi 
mpd_qrotate
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qscaleb
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qscaleb
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qscaleb
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qshiftn
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qshiftn
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qshiftn
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qshift
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qshift
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qshift
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qminus
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qminus
gvn
                            
load eliminated by PRE 
mpd_qminus
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qminus
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qplus
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qplus
gvn
                            
load eliminated by PRE 
mpd_qplus
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qplus
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
_mpd_qround_to_integral
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
_mpd_qround_to_integral
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
_mpd_qround_to_integral
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
_mpd_base_ndivmod
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
_mpd_base_ndivmod
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
_mpd_base_ndivmod
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
_mpd_qdivmod
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
_mpd_qdivmod
gvn
                            
load eliminated by PRE 
_mpd_qdivmod
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
_mpd_qdivmod
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qdivmod
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qdivmod
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qdivmod
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
_mpd_qpow_uint
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
_mpd_qpow_uint
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
_mpd_qpow_uint
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
_mpd_qexp
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
_mpd_qexp
gvn
                                     
load of type i64 not eliminated because it is clobbered by store 
_mpd_qexp
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qexp
gvn
                   
load eliminated by PRE 
mpd_qexp
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qexp
gvn
                            
load eliminated by PRE 
mpd_qexp
gvn
                                     
load of type i64 eliminated in favor of phi 
mpd_qexp
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
_mpd_qln
gvn
                   
load eliminated by PRE 
_mpd_qln
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
_mpd_qln
gvn
                                     
load of type i64 eliminated in favor of phi 
_mpd_qln
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qln
gvn
                   
load eliminated by PRE 
mpd_qln
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qln
gvn
                                     
load of type i64 eliminated in favor of phi 
mpd_qln
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qlog10
gvn
                   
load eliminated by PRE 
mpd_qlog10
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qlog10
gvn
                            
load eliminated by PRE 
mpd_qlog10
gvn
                                     
load of type i64 eliminated in favor of phi 
mpd_qlog10
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qmax
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qmax
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qmax
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qmax_mag
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qmax_mag
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qmax_mag
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qmin
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qmin
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qmin
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qmin_mag
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qmin_mag
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qmin_mag
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qnext_minus
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qnext_minus
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qnext_minus
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qnext_plus
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qnext_plus
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qnext_plus
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qnext_toward
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qnext_toward
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qnext_toward
gvn
                   
load of type i64* not eliminated because it is clobbered by call 
_mpd_qpow_mpd
gvn
    
load of type i64 not eliminated because it is clobbered by store 
_mpd_qpow_mpd
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
_mpd_qpow_int
gvn
                   
load eliminated by PRE 
_mpd_qpow_int
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
_mpd_qpow_int
gvn
                                     
load of type i64 eliminated in favor of phi 
_mpd_qpow_int
gvn
    
load of type i64 not eliminated because it is clobbered by store 
_mpd_qpow_int
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
_mpd_qpow_real
gvn
                   
load eliminated by PRE 
_mpd_qpow_real
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
_mpd_qpow_real
gvn
                                     
load of type i64 eliminated in favor of phi 
_mpd_qpow_real
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qpow
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qpow
gvn
    
load of type i64 not eliminated because it is clobbered by store 
mpd_qpow
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qcheck_3nans
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qcheck_3nans
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qcheck_3nans
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
_mpd_qrescale
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
_mpd_qrescale
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
_mpd_qrescale
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qrem
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qrem
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qrem
gvn
                   
load of type i64* not eliminated because it is clobbered by call 
_mpd_qpowmod_uint
gvn
    
load of type i64 not eliminated because it is clobbered by store 
_mpd_qpowmod_uint
licm
                   
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qpowmod
licm
    
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qpowmod
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qpowmod
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qpowmod
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qpowmod
gvn
    
load of type i64 not eliminated because it is clobbered by call 
mpd_qpowmod
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qquantize
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qquantize
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qquantize
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qreduce
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qreduce
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qreduce
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qrem_near
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qrem_near
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qrem_near
gvn
                   
load eliminated by PRE 
mpd_qrem_near
gvn
                            
load eliminated by PRE 
mpd_qrem_near
gvn
                                     
load of type i64 eliminated in favor of phi 
mpd_qrem_near
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qsqrt
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qsqrt
gvn
                                     
load of type i64 eliminated in favor of phi 
mpd_qsqrt
1930
1931
    return 1;
1932
}
1933
1934
/*
1935
 * Copy to a decimal with a static buffer. The caller has to make sure that
1936
 * the buffer is big enough. Cannot fail.
1937
 */
1938
static void
1939
mpd_qcopy_static(mpd_t *result, const mpd_t *a)
1940
{
1941
    if (result == a) return;
1942
1943
    memcpy(result->data, a->data, a->len * (sizeof *result->data));
gvn
                            
load of type i8* eliminated in favor of inttoptr 
mpd_qsshiftr
gvn
                   
load of type i8* eliminated in favor of inttoptr 
_mpd_qget_uint
1944
1945
    mpd_copy_flags(result, a);
inline
    
mpd_copy_flags should always be inlined (cost=always) 
mpd_qcopy_static
inline
    
mpd_copy_flags inlined into mpd_qcopy_static 
mpd_qcopy_static
1946
    result->exp = a->exp;
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qcopy_static
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qsshiftr
gvn
                     
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qget_uint
1947
    result->digits = a->digits;
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qcopy_static
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qsshiftr
gvn
                        
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qget_uint
1948
    result->len = a->len;
gvn
                     
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qcopy_static
gvn
                     
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qsshiftr
gvn
                     
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qget_uint
1949
}
1950
1951
/*
1952
 * Return a newly allocated copy of the operand. In case of an error,
1953
 * status is set to MPD_Malloc_error and the return value is NULL.
1954
 */
1955
mpd_t *
1956
mpd_qncopy(const mpd_t *a)
1957
{
1958
    mpd_t *result;
1959
1960
    if ((result = mpd_qnew_size(a->len)) == NULL) {
inline
                  
mpd_qnew_size will not be inlined into mpd_qncopy because its definition is unavailable 
mpd_qncopy
gvn
                                   
load of type i64 eliminated in favor of load 
_mpd_qinvroot
1961
        return NULL;
1962
    }
1963
    memcpy(result->data, a->data, a->len * (sizeof *result->data));
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qncopy
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qncopy
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qncopy
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qfma
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qfma
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qfma
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
_mpd_qinvroot
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
_mpd_qinvroot
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qinvroot
gvn
                   
load of type i8* not eliminated because it is clobbered by call 
mpd_qinvroot
gvn
                            
load of type i8* not eliminated because it is clobbered by call 
mpd_qinvroot
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qinvroot
1964
    mpd_copy_flags(result, a);
inline
    
mpd_copy_flags should always be inlined (cost=always) 
mpd_qncopy
inline
    
mpd_copy_flags inlined into mpd_qncopy 
mpd_qncopy
1965
    result->exp = a->exp;
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qncopy
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qfma
gvn
                     
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qinvroot
gvn
                     
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qinvroot
1966
    result->digits = a->digits;
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qncopy
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qfma
gvn
                        
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qinvroot
gvn
                        
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qinvroot
1967
    result->len = a->len;
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qncopy
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qfma
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
_mpd_qinvroot
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qinvroot
1968
1969
    return result;
1970
}
1971
1972
/*
1973
 * Copy a decimal and set the sign to positive. In case of an error, the
1974
 * status is set to MPD_Malloc_error.
1975
 */
1976
int
1977
mpd_qcopy_abs(mpd_t *result, const mpd_t *a, uint32_t *status)
1978
{
1979
    if (!mpd_qcopy(result, a, status)) {
inline
         
mpd_qcopy can be inlined into mpd_qcopy_abs with cost=215 (threshold=250) 
mpd_qcopy_abs
inline
         
mpd_qcopy inlined into mpd_qcopy_abs 
mpd_qcopy_abs
1980
        return 0;
1981
    }
1982
    mpd_set_positive(result);
inline
    
mpd_set_positive should always be inlined (cost=always) 
mpd_qcopy_abs
inline
    
mpd_set_positive inlined into mpd_qcopy_abs 
mpd_qcopy_abs
1983
    return 1;
1984
}
1985
1986
/*
1987
 * Copy a decimal and negate the sign. In case of an error, the
1988
 * status is set to MPD_Malloc_error.
1989
 */
1990
int
1991
mpd_qcopy_negate(mpd_t *result, const mpd_t *a, uint32_t *status)
1992
{
1993
    if (!mpd_qcopy(result, a, status)) {
inline
         
mpd_qcopy can be inlined into mpd_qcopy_negate with cost=215 (threshold=250) 
mpd_qcopy_negate
inline
         
mpd_qcopy inlined into mpd_qcopy_negate 
mpd_qcopy_negate
1994
        return 0;
1995
    }
1996
    _mpd_negate(result);
inline
    
_mpd_negate can be inlined into mpd_qcopy_negate with cost=-15020 (threshold=487) 
mpd_qcopy_negate
inline
    
_mpd_negate inlined into mpd_qcopy_negate 
mpd_qcopy_negate
1997
    return 1;
1998
}
1999
2000
/*
2001
 * Copy a decimal, setting the sign of the first operand to the sign of the
2002
 * second operand. In case of an error, the status is set to MPD_Malloc_error.
2003
 */
2004
int
2005
mpd_qcopy_sign(mpd_t *result, const mpd_t *a, const mpd_t *b, uint32_t *status)
2006
{
2007
    uint8_t sign_b = mpd_sign(b); /* result may equal b! */
inline
                     
mpd_sign should always be inlined (cost=always) 
mpd_qcopy_sign
inline
                     
mpd_sign inlined into mpd_qcopy_sign 
mpd_qcopy_sign
2008
2009
    if (!mpd_qcopy(result, a, status)) {
inline
         
mpd_qcopy can be inlined into mpd_qcopy_sign with cost=215 (threshold=250) 
mpd_qcopy_sign
inline
         
mpd_qcopy inlined into mpd_qcopy_sign 
mpd_qcopy_sign
2010
        return 0;
2011
    }
2012
    mpd_set_sign(result, sign_b);
inline
    
mpd_set_sign should always be inlined (cost=always) 
mpd_qcopy_sign
inline
    
mpd_set_sign inlined into mpd_qcopy_sign 
mpd_qcopy_sign
2013
    return 1;
2014
}
2015
2016
2017
/******************************************************************************/
2018
/*                                Comparisons                                 */
2019
/******************************************************************************/
2020
2021
/*
2022
 * For all functions that compare two operands and return an int the usual
2023
 * convention applies to the return value:
2024
 *
2025
 * -1 if op1 < op2
2026
 *  0 if op1 == op2
2027
 *  1 if op1 > op2
2028
 *
2029
 *  INT_MAX for error
2030
 */
2031
2032
2033
/* Convenience macro. If a and b are not equal, return from the calling
2034
 * function with the correct comparison value. */
2035
#define CMP_EQUAL_OR_RETURN(a, b)  \
2036
        if (a != b) {              \
2037
                if (a < b) {       \
2038
                        return -1; \
2039
                }                  \
2040
                return 1;          \
2041
        }
2042
2043
/*
2044
 * Compare the data of big and small. This function does the equivalent
2045
 * of first shifting small to the left and then comparing the data of
2046
 * big and small, except that no allocation for the left shift is needed.
2047
 */
2048
static int
2049
_mpd_basecmp(mpd_uint_t *big, mpd_uint_t *small, mpd_size_t n, mpd_size_t m,
2050
             mpd_size_t shift)
2051
{
2052
#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__)
2053
    /* spurious uninitialized warnings */
2054
    mpd_uint_t l=l, lprev=lprev, h=h;
2055
#else
2056
    mpd_uint_t l, lprev, h;
2057
#endif
2058
    mpd_uint_t q, r;
2059
    mpd_uint_t ph, x;
2060
2061
    assert(m > 0 && n >= m && shift > 0);
2062
2063
    _mpd_div_word(&q, &r, (mpd_uint_t)shift, MPD_RDIGITS);
inline
    
_mpd_div_word can be inlined into _mpd_basecmp with cost=-35 (threshold=487) 
_mpd_basecmp
inline
    
_mpd_div_word inlined into _mpd_basecmp 
_mpd_basecmp
2064
2065
    if (r != 0) {
2066
2067
        ph = mpd_pow10[r];
2068
2069
        --m; --n;
2070
        _mpd_divmod_pow10(&h, &lprev, small[m--], MPD_RDIGITS-r);
inline
        
_mpd_divmod_pow10 too costly to inline (cost=385, threshold=325) 
_mpd_basecmp
inline
        
_mpd_divmod_pow10 will not be inlined into _mpd_basecmp 
_mpd_basecmp
2071
        if (h != 0) {
gvn
            
load of type i64 not eliminated because it is clobbered by call 
_mpd_basecmp
2072
            CMP_EQUAL_OR_RETURN(big[n], h)
gvn
            
load of type i64 not eliminated because it is clobbered by call 
_mpd_basecmp
2073
            --n;
2074
        }
2075
        for (; m != MPD_SIZE_MAX; m--,n--) {
loop-vectorize
        
loop not vectorized: loop control flow is not understood by vectorizer 
_mpd_basecmp
loop-vectorize
        
loop not vectorized 
_mpd_basecmp
2076
            _mpd_divmod_pow10(&h, &l, small[m], MPD_RDIGITS-r);
inline
            
_mpd_divmod_pow10 too costly to inline (cost=385, threshold=325) 
_mpd_basecmp
inline
            
_mpd_divmod_pow10 will not be inlined into _mpd_basecmp 
_mpd_basecmp
2077
            x = ph * lprev + h;
licm
                     
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_basecmp
licm
                             
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_basecmp
gvn
                             
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_basecmp
2078
            CMP_EQUAL_OR_RETURN(big[n], x)
gvn
            
load of type i64 not eliminated because it is clobbered by call 
_mpd_basecmp
2079
            lprev = l;
licm
                    
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_basecmp
licm
                  
Moving accesses to memory location out of the loop 
_mpd_basecmp
gvn
                  
load of type i64 not eliminated because it is clobbered by call 
_mpd_basecmp
gvn
                    
load of type i64 not eliminated because it is clobbered by call 
_mpd_basecmp
2080
        }
2081
        x = ph * lprev;
gvn
                 
load of type i64 not eliminated because it is clobbered by call 
_mpd_basecmp
gvn
                 
load eliminated by PRE 
_mpd_basecmp
2082
        CMP_EQUAL_OR_RETURN(big[q], x)
gvn
        
load of type i64 not eliminated because it is clobbered by call 
_mpd_basecmp
2083
    }
2084
    else {
2085
        while (--m != MPD_SIZE_MAX) {
loop-vectorize
        
loop not vectorized: loop control flow is not understood by vectorizer 
_mpd_basecmp
loop-vectorize
        
loop not vectorized 
_mpd_basecmp
2086
            CMP_EQUAL_OR_RETURN(big[m+q], small[m])
2087
        }
2088
    }
2089
2090
    return !_mpd_isallzero(big, q);
inline
            
_mpd_isallzero can be inlined into _mpd_basecmp with cost=-5 (threshold=325) 
_mpd_basecmp
inline
            
_mpd_isallzero inlined into _mpd_basecmp 
_mpd_basecmp
2091
}
2092
2093
/* Compare two decimals with the same adjusted exponent. */
2094
static int
2095
_mpd_cmp_same_adjexp(const mpd_t *a, const mpd_t *b)
2096
{
2097
    mpd_ssize_t shift, i;
2098
2099
    if (a->exp != b->exp) {
2100
        /* Cannot wrap: a->exp + a->digits = b->exp + b->digits, so
2101
         * a->exp - b->exp = b->digits - a->digits. */
2102
        shift = a->exp - b->exp;
2103
        if (shift > 0) {
2104
            return -1 * _mpd_basecmp(b->data, a->data, b->len, a->len, shift);
inline
                        
_mpd_basecmp too costly to inline (cost=410, threshold=250) 
_mpd_cmp_same_adjexp
inline
                        
_mpd_basecmp will not be inlined into _mpd_cmp_same_adjexp 
_mpd_cmp_same_adjexp
inline
                        
_mpd_basecmp too costly to inline (cost=410, threshold=250) 
_mpd_cmp
inline
                        
_mpd_basecmp will not be inlined into _mpd_cmp 
_mpd_cmp
inline
                        
_mpd_basecmp too costly to inline (cost=410, threshold=250) 
_mpd_cmp_abs
inline
                        
_mpd_basecmp will not be inlined into _mpd_cmp_abs 
_mpd_cmp_abs
2105
        }
2106
        else {
2107
            return _mpd_basecmp(a->data, b->data, a->len, b->len, -shift);
inline
                   
_mpd_basecmp too costly to inline (cost=410, threshold=250) 
_mpd_cmp_same_adjexp
inline
                   
_mpd_basecmp will not be inlined into _mpd_cmp_same_adjexp 
_mpd_cmp_same_adjexp
inline
                   
_mpd_basecmp too costly to inline (cost=410, threshold=250) 
_mpd_cmp
inline
                   
_mpd_basecmp will not be inlined into _mpd_cmp 
_mpd_cmp
inline
                   
_mpd_basecmp too costly to inline (cost=410, threshold=250) 
_mpd_cmp_abs
inline
                   
_mpd_basecmp will not be inlined into _mpd_cmp_abs 
_mpd_cmp_abs
2108
        }
2109
    }
2110
2111
    /*
2112
     * At this point adjexp(a) == adjexp(b) and a->exp == b->exp,
2113
     * so a->digits == b->digits, therefore a->len == b->len.
2114
     */
2115
    for (i = a->len-1; i >= 0; --i) {
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
_mpd_cmp
loop-vectorize
    
loop not vectorized 
_mpd_cmp
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
_mpd_cmp_abs
loop-vectorize
    
loop not vectorized 
_mpd_cmp_abs
2116
        CMP_EQUAL_OR_RETURN(a->data[i], b->data[i])
licm
        
hosting getelementptr 
_mpd_cmp_same_adjexp
licm
        
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_cmp_same_adjexp
licm
        
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_cmp
gvn
        
load of type i64* eliminated in favor of phi 
_mpd_cmp
licm
        
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_cmp_abs
gvn
        
load of type i64* eliminated in favor of phi 
_mpd_cmp_abs
2117
    }
2118
2119
    return 0;
2120
}
2121
2122
/* Compare two numerical values. */
2123
static int
2124
_mpd_cmp(const mpd_t *a, const mpd_t *b)
2125
{
2126
    mpd_ssize_t adjexp_a, adjexp_b;
2127
2128
    /* equal pointers */
2129
    if (a == b) {
2130
        return 0;
2131
    }
2132
2133
    /* infinities */
2134
    if (mpd_isinfinite(a)) {
inline
        
mpd_isinfinite should always be inlined (cost=always) 
_mpd_cmp
inline
        
mpd_isinfinite inlined into _mpd_cmp 
_mpd_cmp
2135
        if (mpd_isinfinite(b)) {
inline
            
mpd_isinfinite should always be inlined (cost=always) 
_mpd_cmp
inline
            
mpd_isinfinite inlined into _mpd_cmp 
_mpd_cmp
2136
            return mpd_isnegative(b) - mpd_isnegative(a);
inline
                   
mpd_isnegative should always be inlined (cost=always) 
_mpd_cmp
inline
                   
mpd_isnegative inlined into _mpd_cmp 
_mpd_cmp
inline
                                       
mpd_isnegative should always be inlined (cost=always) 
_mpd_cmp
inline
                                       
mpd_isnegative inlined into _mpd_cmp 
_mpd_cmp
2137
        }
2138
        return mpd_arith_sign(a);
inline
               
mpd_arith_sign should always be inlined (cost=always) 
_mpd_cmp
inline
               
mpd_arith_sign inlined into _mpd_cmp 
_mpd_cmp
2139
    }
2140
    if (mpd_isinfinite(b)) {
2141
        return -mpd_arith_sign(b);
inline
                
mpd_arith_sign should always be inlined (cost=always) 
_mpd_cmp
inline
                
mpd_arith_sign inlined into _mpd_cmp 
_mpd_cmp
2142
    }
2143
2144
    /* zeros */
2145
    if (mpd_iszerocoeff(a)) {
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
_mpd_cmp
inline
        
mpd_iszerocoeff inlined into _mpd_cmp 
_mpd_cmp
2146
        if (mpd_iszerocoeff(b)) {
inline
            
mpd_iszerocoeff should always be inlined (cost=always) 
_mpd_cmp
inline
            
mpd_iszerocoeff inlined into _mpd_cmp 
_mpd_cmp
2147
            return 0;
2148
        }
2149
        return -mpd_arith_sign(b);
inline
                
mpd_arith_sign should always be inlined (cost=always) 
_mpd_cmp
inline
                
mpd_arith_sign inlined into _mpd_cmp 
_mpd_cmp
2150
    }
2151
    if (mpd_iszerocoeff(b)) {
2152
        return mpd_arith_sign(a);
inline
               
mpd_arith_sign should always be inlined (cost=always) 
_mpd_cmp
inline
               
mpd_arith_sign inlined into _mpd_cmp 
_mpd_cmp
2153
    }
2154
2155
    /* different signs */
2156
    if (mpd_sign(a) != mpd_sign(b)) {
inline
                       
mpd_sign should always be inlined (cost=always) 
_mpd_cmp
inline
                       
mpd_sign inlined into _mpd_cmp 
_mpd_cmp
inline
        
mpd_sign should always be inlined (cost=always) 
_mpd_cmp
inline
        
mpd_sign inlined into _mpd_cmp 
_mpd_cmp
2157
        return mpd_sign(b) - mpd_sign(a);
inline
                             
mpd_sign should always be inlined (cost=always) 
_mpd_cmp
inline
                             
mpd_sign inlined into _mpd_cmp 
_mpd_cmp
inline
               
mpd_sign should always be inlined (cost=always) 
_mpd_cmp
inline
               
mpd_sign inlined into _mpd_cmp 
_mpd_cmp
2158
    }
2159
2160
    /* different adjusted exponents */
2161
    adjexp_a = mpd_adjexp(a);
inline
               
mpd_adjexp should always be inlined (cost=always) 
_mpd_cmp
inline
               
mpd_adjexp inlined into _mpd_cmp 
_mpd_cmp
2162
    adjexp_b = mpd_adjexp(b);
inline
               
mpd_adjexp should always be inlined (cost=always) 
_mpd_cmp
inline
               
mpd_adjexp inlined into _mpd_cmp 
_mpd_cmp
2163
    if (adjexp_a != adjexp_b) {
2164
        if (adjexp_a < adjexp_b) {
2165
            return -1 * mpd_arith_sign(a);
inline
                        
mpd_arith_sign should always be inlined (cost=always) 
_mpd_cmp
inline
                        
mpd_arith_sign inlined into _mpd_cmp 
_mpd_cmp
2166
        }
2167
        return mpd_arith_sign(a);
2168
    }
2169
2170
    /* same adjusted exponents */
2171
    return _mpd_cmp_same_adjexp(a, b) * mpd_arith_sign(a);
inline
                                        
mpd_arith_sign should always be inlined (cost=always) 
_mpd_cmp
inline
                                        
mpd_arith_sign inlined into _mpd_cmp 
_mpd_cmp
inline
           
_mpd_cmp_same_adjexp can be inlined into _mpd_cmp with cost=200 (threshold=250) 
_mpd_cmp
inline
           
_mpd_cmp_same_adjexp inlined into _mpd_cmp 
_mpd_cmp
2172
}
2173
2174
/* Compare the absolutes of two numerical values. */
2175
static int
2176
_mpd_cmp_abs(const mpd_t *a, const mpd_t *b)
2177
{
2178
    mpd_ssize_t adjexp_a, adjexp_b;
2179
2180
    /* equal pointers */
2181
    if (a == b) {
2182
        return 0;
2183
    }
2184
2185
    /* infinities */
2186
    if (mpd_isinfinite(a)) {
inline
        
mpd_isinfinite should always be inlined (cost=always) 
_mpd_cmp_abs
inline
        
mpd_isinfinite inlined into _mpd_cmp_abs 
_mpd_cmp_abs
2187
        if (mpd_isinfinite(b)) {
inline
            
mpd_isinfinite should always be inlined (cost=always) 
_mpd_cmp_abs
inline
            
mpd_isinfinite inlined into _mpd_cmp_abs 
_mpd_cmp_abs
2188
            return 0;
2189
        }
2190
        return 1;
2191
    }
2192
    if (mpd_isinfinite(b)) {
2193
        return -1;
2194
    }
2195
2196
    /* zeros */
2197
    if (mpd_iszerocoeff(a)) {
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
_mpd_cmp_abs
inline
        
mpd_iszerocoeff inlined into _mpd_cmp_abs 
_mpd_cmp_abs
2198
        if (mpd_iszerocoeff(b)) {
inline
            
mpd_iszerocoeff should always be inlined (cost=always) 
_mpd_cmp_abs
inline
            
mpd_iszerocoeff inlined into _mpd_cmp_abs 
_mpd_cmp_abs
2199
            return 0;
2200
        }
2201
        return -1;
2202
    }
2203
    if (mpd_iszerocoeff(b)) {
2204
        return 1;
2205
    }
2206
2207
    /* different adjusted exponents */
2208
    adjexp_a = mpd_adjexp(a);
inline
               
mpd_adjexp should always be inlined (cost=always) 
_mpd_cmp_abs
inline
               
mpd_adjexp inlined into _mpd_cmp_abs 
_mpd_cmp_abs
2209
    adjexp_b = mpd_adjexp(b);
inline
               
mpd_adjexp should always be inlined (cost=always) 
_mpd_cmp_abs
inline
               
mpd_adjexp inlined into _mpd_cmp_abs 
_mpd_cmp_abs
2210
    if (adjexp_a != adjexp_b) {
2211
        if (adjexp_a < adjexp_b) {
2212
            return -1;
2213
        }
2214
        return 1;
2215
    }
2216
2217
    /* same adjusted exponents */
2218
    return _mpd_cmp_same_adjexp(a, b);
inline
           
_mpd_cmp_same_adjexp can be inlined into _mpd_cmp_abs with cost=-14800 (threshold=250) 
_mpd_cmp_abs
inline
           
_mpd_cmp_same_adjexp inlined into _mpd_cmp_abs 
_mpd_cmp_abs
2219
}
2220
2221
/* Compare two values and return an integer result. */
2222
int
2223
mpd_qcmp(const mpd_t *a, const mpd_t *b, uint32_t *status)
2224
{
2225
    if (mpd_isspecial(a) || mpd_isspecial(b)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qcmp
inline
        
mpd_isspecial inlined into mpd_qcmp 
mpd_qcmp
inline
                            
mpd_isspecial should always be inlined (cost=always) 
mpd_qcmp
inline
                            
mpd_isspecial inlined into mpd_qcmp 
mpd_qcmp
2226
        if (mpd_isnan(a) || mpd_isnan(b)) {
inline
                            
mpd_isnan should always be inlined (cost=always) 
mpd_qcmp
inline
                            
mpd_isnan inlined into mpd_qcmp 
mpd_qcmp
inline
            
mpd_isnan should always be inlined (cost=always) 
mpd_qcmp
inline
            
mpd_isnan inlined into mpd_qcmp 
mpd_qcmp
2227
            *status |= MPD_Invalid_operation;
licm
                    
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qexp
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
mpd_qexp
licm
                    
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qln
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
mpd_qln
licm
                    
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qlog10
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
mpd_qlog10
2228
            return INT_MAX;
2229
        }
2230
    }
2231
2232
    return _mpd_cmp(a, b);
inline
           
_mpd_cmp too costly to inline (cost=550, threshold=250) 
mpd_qcmp
inline
           
_mpd_cmp will not be inlined into mpd_qcmp 
mpd_qcmp
inline
           
_mpd_cmp too costly to inline (cost=525, threshold=250) 
mpd_qexp
inline
           
_mpd_cmp will not be inlined into mpd_qexp 
mpd_qexp
inline
           
_mpd_cmp too costly to inline (cost=525, threshold=250) 
mpd_qln
inline
           
_mpd_cmp will not be inlined into mpd_qln 
mpd_qln
inline
           
_mpd_cmp too costly to inline (cost=525, threshold=250) 
mpd_qlog10
inline
           
_mpd_cmp will not be inlined into mpd_qlog10 
mpd_qlog10
2233
}
2234
2235
/*
2236
 * Compare a and b, convert the usual integer result to a decimal and
2237
 * store it in 'result'. For convenience, the integer result of the comparison
2238
 * is returned. Comparisons involving NaNs return NaN/INT_MAX.
2239
 */
2240
int
2241
mpd_qcompare(mpd_t *result, const mpd_t *a, const mpd_t *b,
2242
             const mpd_context_t *ctx, uint32_t *status)
2243
{
2244
    int c;
2245
2246
    if (mpd_isspecial(a) || mpd_isspecial(b)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qcompare
inline
        
mpd_isspecial inlined into mpd_qcompare 
mpd_qcompare
inline
                            
mpd_isspecial should always be inlined (cost=always) 
mpd_qcompare
inline
                            
mpd_isspecial inlined into mpd_qcompare 
mpd_qcompare
2247
        if (mpd_qcheck_nans(result, a, b, ctx, status)) {
inline
            
mpd_qcheck_nans too costly to inline (cost=385, threshold=250) 
mpd_qcompare
inline
            
mpd_qcheck_nans will not be inlined into mpd_qcompare 
mpd_qcompare
2248
            return INT_MAX;
2249
        }
2250
    }
2251
2252
    c = _mpd_cmp(a, b);
inline
        
_mpd_cmp too costly to inline (cost=550, threshold=250) 
mpd_qcompare
inline
        
_mpd_cmp will not be inlined into mpd_qcompare 
mpd_qcompare
2253
    _settriple(result, (c < 0), (c != 0), 0);
inline
    
_settriple can be inlined into mpd_qcompare with cost=185 (threshold=250) 
mpd_qcompare
inline
    
_settriple inlined into mpd_qcompare 
mpd_qcompare
2254
    return c;
2255
}
2256
2257
/* Same as mpd_compare(), but signal for all NaNs, i.e. also for quiet NaNs. */
2258
int
2259
mpd_qcompare_signal(mpd_t *result, const mpd_t *a, const mpd_t *b,
2260
                    const mpd_context_t *ctx, uint32_t *status)
2261
{
2262
    int c;
2263
2264
    if (mpd_isspecial(a) || mpd_isspecial(b)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qcompare_signal
inline
        
mpd_isspecial inlined into mpd_qcompare_signal 
mpd_qcompare_signal
inline
                            
mpd_isspecial should always be inlined (cost=always) 
mpd_qcompare_signal
inline
                            
mpd_isspecial inlined into mpd_qcompare_signal 
mpd_qcompare_signal
2265
        if (mpd_qcheck_nans(result, a, b, ctx, status)) {
inline
            
mpd_qcheck_nans too costly to inline (cost=385, threshold=250) 
mpd_qcompare_signal
inline
            
mpd_qcheck_nans will not be inlined into mpd_qcompare_signal 
mpd_qcompare_signal
2266
            *status |= MPD_Invalid_operation;
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
mpd_qcompare_signal
2267
            return INT_MAX;
2268
        }
2269
    }
2270
2271
    c = _mpd_cmp(a, b);
inline
        
_mpd_cmp too costly to inline (cost=550, threshold=250) 
mpd_qcompare_signal
inline
        
_mpd_cmp will not be inlined into mpd_qcompare_signal 
mpd_qcompare_signal
2272
    _settriple(result, (c < 0), (c != 0), 0);
inline
    
_settriple can be inlined into mpd_qcompare_signal with cost=185 (threshold=250) 
mpd_qcompare_signal
inline
    
_settriple inlined into mpd_qcompare_signal 
mpd_qcompare_signal
2273
    return c;
2274
}
2275
2276
/* Compare the operands using a total order. */
2277
int
2278
mpd_cmp_total(const mpd_t *a, const mpd_t *b)
2279
{
2280
    mpd_t aa, bb;
2281
    int nan_a, nan_b;
2282
    int c;
2283
2284
    if (mpd_sign(a) != mpd_sign(b)) {
inline
        
mpd_sign should always be inlined (cost=always) 
mpd_cmp_total
inline
        
mpd_sign inlined into mpd_cmp_total 
mpd_cmp_total
inline
                       
mpd_sign should always be inlined (cost=always) 
mpd_cmp_total
inline
                       
mpd_sign inlined into mpd_cmp_total 
mpd_cmp_total
2285
        return mpd_sign(b) - mpd_sign(a);
inline
               
mpd_sign should always be inlined (cost=always) 
mpd_cmp_total
inline
               
mpd_sign inlined into mpd_cmp_total 
mpd_cmp_total
inline
                             
mpd_sign should always be inlined (cost=always) 
mpd_cmp_total
inline
                             
mpd_sign inlined into mpd_cmp_total 
mpd_cmp_total
2286
    }
2287
2288
2289
    if (mpd_isnan(a)) {
inline
        
mpd_isnan should always be inlined (cost=always) 
mpd_cmp_total
inline
        
mpd_isnan inlined into mpd_cmp_total 
mpd_cmp_total
2290
        c = 1;
2291
        if (mpd_isnan(b)) {
2292
            nan_a = (mpd_isqnan(a)) ? 1 : 0;
inline
                     
mpd_isqnan should always be inlined (cost=always) 
mpd_cmp_total
inline
                     
mpd_isqnan inlined into mpd_cmp_total 
mpd_cmp_total
2293
            nan_b = (mpd_isqnan(b)) ? 1 : 0;
inline
                     
mpd_isqnan should always be inlined (cost=always) 
mpd_cmp_total
inline
                     
mpd_isqnan inlined into mpd_cmp_total 
mpd_cmp_total
2294
            if (nan_b == nan_a) {
2295
                if (a->len > 0 && b->len > 0) {
2296
                    _mpd_copy_shared(&aa, a);
inline
                    
_mpd_copy_shared can be inlined into mpd_cmp_total with cost=0 (threshold=487) 
mpd_cmp_total
inline
                    
_mpd_copy_shared inlined into mpd_cmp_total 
mpd_cmp_total
2297
                    _mpd_copy_shared(&bb, b);
inline
                    
_mpd_copy_shared can be inlined into mpd_cmp_total with cost=0 (threshold=487) 
mpd_cmp_total
inline
                    
_mpd_copy_shared inlined into mpd_cmp_total 
mpd_cmp_total
2298
                    aa.exp = bb.exp = 0;
2299
                    /* compare payload */
2300
                    c = _mpd_cmp_abs(&aa, &bb);
inline
                        
_mpd_cmp_abs too costly to inline (cost=335, threshold=250) 
mpd_cmp_total
inline
                        
_mpd_cmp_abs will not be inlined into mpd_cmp_total 
mpd_cmp_total
2301
                }
2302
                else {
2303
                    c = (a->len > 0) - (b->len > 0);
2304
                }
2305
            }
2306
            else {
2307
                c = nan_a - nan_b;
2308
            }
2309
        }
2310
    }
2311
    else if (mpd_isnan(b)) {
inline
             
mpd_isnan should always be inlined (cost=always) 
mpd_cmp_total
inline
             
mpd_isnan inlined into mpd_cmp_total 
mpd_cmp_total
2312
        c = -1;
2313
    }
2314
    else {
2315
        c = _mpd_cmp_abs(a, b);
inline
            
_mpd_cmp_abs too costly to inline (cost=360, threshold=250) 
mpd_cmp_total
inline
            
_mpd_cmp_abs will not be inlined into mpd_cmp_total 
mpd_cmp_total
2316
        if (c == 0 && a->exp != b->exp) {
gvn
                         
load of type i64 not eliminated because it is clobbered by call 
mpd_cmp_total
gvn
                                   
load of type i64 not eliminated because it is clobbered by call 
mpd_cmp_total
2317
            c = (a->exp < b->exp) ? -1 : 1;
2318
        }
2319
    }
2320
2321
    return c * mpd_arith_sign(a);
inline
               
mpd_arith_sign should always be inlined (cost=always) 
mpd_cmp_total
inline
               
mpd_arith_sign inlined into mpd_cmp_total 
mpd_cmp_total
2322
}
2323
2324
/*
2325
 * Compare a and b according to a total order, convert the usual integer result
2326
 * to a decimal and store it in 'result'. For convenience, the integer result
2327
 * of the comparison is returned.
2328
 */
2329
int
2330
mpd_compare_total(mpd_t *result, const mpd_t *a, const mpd_t *b)
2331
{
2332
    int c;
2333
2334
    c = mpd_cmp_total(a, b);
inline
        
mpd_cmp_total too costly to inline (cost=400, threshold=250) 
mpd_compare_total
inline
        
mpd_cmp_total will not be inlined into mpd_compare_total 
mpd_compare_total
2335
    _settriple(result, (c < 0), (c != 0), 0);
inline
    
_settriple can be inlined into mpd_compare_total with cost=185 (threshold=250) 
mpd_compare_total
inline
    
_settriple inlined into mpd_compare_total 
mpd_compare_total
2336
    return c;
2337
}
2338
2339
/* Compare the magnitude of the operands using a total order. */
2340
int
2341
mpd_cmp_total_mag(const mpd_t *a, const mpd_t *b)
2342
{
2343
    mpd_t aa, bb;
2344
2345
    _mpd_copy_shared(&aa, a);
inline
    
_mpd_copy_shared can be inlined into mpd_cmp_total_mag with cost=0 (threshold=487) 
mpd_cmp_total_mag
inline
    
_mpd_copy_shared inlined into mpd_cmp_total_mag 
mpd_cmp_total_mag
2346
    _mpd_copy_shared(&bb, b);
inline
    
_mpd_copy_shared can be inlined into mpd_cmp_total_mag with cost=0 (threshold=487) 
mpd_cmp_total_mag
inline
    
_mpd_copy_shared inlined into mpd_cmp_total_mag 
mpd_cmp_total_mag
2347
2348
    mpd_set_positive(&aa);
inline
    
mpd_set_positive should always be inlined (cost=always) 
mpd_cmp_total_mag
inline
    
mpd_set_positive inlined into mpd_cmp_total_mag 
mpd_cmp_total_mag
2349
    mpd_set_positive(&bb);
inline
    
mpd_set_positive should always be inlined (cost=always) 
mpd_cmp_total_mag
inline
    
mpd_set_positive inlined into mpd_cmp_total_mag 
mpd_cmp_total_mag
2350
2351
    return mpd_cmp_total(&aa, &bb);
inline
           
mpd_cmp_total too costly to inline (cost=400, threshold=250) 
mpd_cmp_total_mag
inline
           
mpd_cmp_total will not be inlined into mpd_cmp_total_mag 
mpd_cmp_total_mag
inline
           
mpd_cmp_total too costly to inline (cost=400, threshold=250) 
mpd_compare_total_mag
inline
           
mpd_cmp_total will not be inlined into mpd_compare_total_mag 
mpd_compare_total_mag
2352
}
2353
2354
/*
2355
 * Compare the magnitude of a and b according to a total order, convert the
2356
 * the usual integer result to a decimal and store it in 'result'.
2357
 * For convenience, the integer result of the comparison is returned.
2358
 */
2359
int
2360
mpd_compare_total_mag(mpd_t *result, const mpd_t *a, const mpd_t *b)
2361
{
2362
    int c;
2363
2364
    c = mpd_cmp_total_mag(a, b);
inline
        
mpd_cmp_total_mag can be inlined into mpd_compare_total_mag with cost=150 (threshold=375) 
mpd_compare_total_mag
inline
        
mpd_cmp_total_mag inlined into mpd_compare_total_mag 
mpd_compare_total_mag
2365
    _settriple(result, (c < 0), (c != 0), 0);
inline
    
_settriple can be inlined into mpd_compare_total_mag with cost=185 (threshold=250) 
mpd_compare_total_mag
inline
    
_settriple inlined into mpd_compare_total_mag 
mpd_compare_total_mag
2366
    return c;
2367
}
2368
2369
/* Determine an ordering for operands that are numerically equal. */
2370
static inline int
2371
_mpd_cmp_numequal(const mpd_t *a, const mpd_t *b)
2372
{
2373
    int sign_a, sign_b;
2374
    int c;
2375
2376
    sign_a = mpd_sign(a);
inline
             
mpd_sign should always be inlined (cost=always) 
_mpd_cmp_numequal
inline
             
mpd_sign inlined into _mpd_cmp_numequal 
_mpd_cmp_numequal
2377
    sign_b = mpd_sign(b);
inline
             
mpd_sign should always be inlined (cost=always) 
_mpd_cmp_numequal
inline
             
mpd_sign inlined into _mpd_cmp_numequal 
_mpd_cmp_numequal
2378
    if (sign_a != sign_b) {
2379
        c = sign_b - sign_a;
2380
    }
2381
    else {
2382
        c = (a->exp < b->exp) ? -1 : 1;
gvn
                
load of type i64 not eliminated because it is clobbered by call 
mpd_qmax
gvn
                         
load of type i64 not eliminated because it is clobbered by call 
mpd_qmax
gvn
                
load of type i64 not eliminated because it is clobbered by call 
mpd_qmax_mag
gvn
                         
load of type i64 not eliminated because it is clobbered by call 
mpd_qmax_mag
gvn
                
load of type i64 not eliminated because it is clobbered by call 
mpd_qmin
gvn
                         
load of type i64 not eliminated because it is clobbered by call 
mpd_qmin
gvn
                
load of type i64 not eliminated because it is clobbered by call 
mpd_qmin_mag
gvn
                         
load of type i64 not eliminated because it is clobbered by call 
mpd_qmin_mag
2383
        c *= mpd_arith_sign(a);
inline
             
mpd_arith_sign should always be inlined (cost=always) 
_mpd_cmp_numequal
inline
             
mpd_arith_sign inlined into _mpd_cmp_numequal 
_mpd_cmp_numequal
2384
    }
2385
2386
    return c;
2387
}
2388
2389
2390
/******************************************************************************/
2391
/*                         Shifting the coefficient                           */
2392
/******************************************************************************/
2393
2394
/*
2395
 * Shift the coefficient of the operand to the left, no check for specials.
2396
 * Both operands may be the same pointer. If the result length has to be
2397
 * increased, mpd_qresize() might fail with MPD_Malloc_error.
2398
 */
2399
int
2400
mpd_qshiftl(mpd_t *result, const mpd_t *a, mpd_ssize_t n, uint32_t *status)
2401
{
2402
    mpd_ssize_t size;
2403
2404
    assert(!mpd_isspecial(a));
2405
    assert(n >= 0);
2406
2407
    if (mpd_iszerocoeff(a) || n == 0) {
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qshiftl
inline
        
mpd_iszerocoeff inlined into mpd_qshiftl 
mpd_qshiftl
2408
        return mpd_qcopy(result, a, status);
inline
               
mpd_qcopy can be inlined into mpd_qshiftl with cost=215 (threshold=250) 
mpd_qshiftl
inline
               
mpd_qcopy inlined into mpd_qshiftl 
mpd_qshiftl
2409
    }
2410
2411
    size = mpd_digits_to_size(a->digits+n);
inline
           
mpd_digits_to_size should always be inlined (cost=always) 
mpd_qshiftl
inline
           
mpd_digits_to_size inlined into mpd_qshiftl 
mpd_qshiftl
2412
    if (!mpd_qresize(result, size, status)) {
inline
         
mpd_qresize should always be inlined (cost=always) 
mpd_qshiftl
inline
         
mpd_qresize inlined into mpd_qshiftl 
mpd_qshiftl
2413
        return 0; /* result is NaN */
2414
    }
2415
2416
    _mpd_baseshiftl(result->data, a->data, size, a->len, n);
inline
    
_mpd_baseshiftl will not be inlined into mpd_qshiftl because its definition is unavailable 
mpd_qshiftl
gvn
                            
load of type i64* not eliminated because it is clobbered by call 
mpd_qshiftl
gvn
                                     
load of type i64* not eliminated in favor of load because it is clobbered by call 
mpd_qshiftl
gvn
                                     
load eliminated by PRE 
mpd_qshiftl
gvn
                                                    
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qshiftl
gvn
                                                    
load eliminated by PRE 
mpd_qshiftl
2417
2418
    mpd_copy_flags(result, a);
inline
    
mpd_copy_flags should always be inlined (cost=always) 
mpd_qshiftl
inline
    
mpd_copy_flags inlined into mpd_qshiftl 
mpd_qshiftl
2419
    result->exp = a->exp;
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qshiftl
2420
    result->digits = a->digits+n;
gvn
                        
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qshiftl
2421
    result->len = size;
2422
2423
    return 1;
2424
}
2425
2426
/* Determine the rounding indicator if all digits of the coefficient are shifted
2427
 * out of the picture. */
2428
static mpd_uint_t
2429
_mpd_get_rnd(const mpd_uint_t *data, mpd_ssize_t len, int use_msd)
2430
{
2431
    mpd_uint_t rnd = 0, rest = 0, word;
2432
2433
    word = data[len-1];
2434
    /* special treatment for the most significant digit if shift == digits */
2435
    if (use_msd) {
2436
        _mpd_divmod_pow10(&rnd, &rest, word, mpd_word_digits(word)-1);
inline
                                             
mpd_word_digits should always be inlined (cost=always) 
_mpd_get_rnd
inline
                                             
mpd_word_digits inlined into _mpd_get_rnd 
_mpd_get_rnd
inline
        
_mpd_divmod_pow10 too costly to inline (cost=385, threshold=325) 
_mpd_get_rnd
inline
        
_mpd_divmod_pow10 will not be inlined into _mpd_get_rnd 
_mpd_get_rnd
2437
        if (len > 1 && rest == 0) {
gvn
                       
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_get_rnd
2438
             rest = !_mpd_isallzero(data, len-1);
inline
                     
_mpd_isallzero can be inlined into _mpd_get_rnd with cost=-5 (threshold=325) 
_mpd_get_rnd
inline
                     
_mpd_isallzero inlined into _mpd_get_rnd 
_mpd_get_rnd
2439
        }
2440
    }
2441
    else {
2442
        rest = !_mpd_isallzero(data, len);
inline
                
_mpd_isallzero can be inlined into _mpd_get_rnd with cost=-5 (threshold=325) 
_mpd_get_rnd
inline
                
_mpd_isallzero inlined into _mpd_get_rnd 
_mpd_get_rnd
2443
    }
2444
2445
    return (rnd == 0 || rnd == 5) ? rnd + !!rest : rnd;
gvn
            
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_get_rnd
gvn
                                            
load of type i64 eliminated in favor of phi 
_mpd_get_rnd
2446
}
2447
2448
/*
2449
 * Same as mpd_qshiftr(), but 'result' is an mpd_t with a static coefficient.
2450
 * It is the caller's responsibility to ensure that the coefficient is big
2451
 * enough. The function cannot fail.
2452
 */
2453
static mpd_uint_t
2454
mpd_qsshiftr(mpd_t *result, const mpd_t *a, mpd_ssize_t n)
2455
{
2456
    mpd_uint_t rnd;
2457
    mpd_ssize_t size;
2458
2459
    assert(!mpd_isspecial(a));
2460
    assert(n >= 0);
2461
2462
    if (mpd_iszerocoeff(a) || n == 0) {
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qsshiftr
inline
        
mpd_iszerocoeff inlined into mpd_qsshiftr 
mpd_qsshiftr
2463
        mpd_qcopy_static(result, a);
inline
        
mpd_qcopy_static can be inlined into mpd_qsshiftr with cost=-14945 (threshold=250) 
mpd_qsshiftr
inline
        
mpd_qcopy_static inlined into mpd_qsshiftr 
mpd_qsshiftr
2464
        return 0;
2465
    }
2466
2467
    if (n >= a->digits) {
gvn
                
load of type i64 eliminated in favor of load 
_mpd_qget_uint
2468
        rnd = _mpd_get_rnd(a->data, a->len, (n==a->digits));
inline
              
_mpd_get_rnd too costly to inline (cost=470, threshold=250) 
mpd_qsshiftr
inline
              
_mpd_get_rnd will not be inlined into mpd_qsshiftr 
mpd_qsshiftr
inline
              
_mpd_get_rnd too costly to inline (cost=470, threshold=250) 
_mpd_qget_uint
inline
              
_mpd_get_rnd will not be inlined into _mpd_qget_uint 
_mpd_qget_uint
2469
        mpd_zerocoeff(result);
inline
        
mpd_zerocoeff can be inlined into mpd_qsshiftr with cost=120 (threshold=250) 
mpd_qsshiftr
inline
        
mpd_zerocoeff inlined into mpd_qsshiftr 
mpd_qsshiftr
2470
    }
2471
    else {
2472
        result->digits = a->digits-n;
2473
        size = mpd_digits_to_size(result->digits);
inline
               
mpd_digits_to_size should always be inlined (cost=always) 
mpd_qsshiftr
inline
               
mpd_digits_to_size inlined into mpd_qsshiftr 
mpd_qsshiftr
2474
        rnd = _mpd_baseshiftr(result->data, a->data, a->len, n);
inline
              
_mpd_baseshiftr will not be inlined into mpd_qsshiftr because its definition is unavailable 
mpd_qsshiftr
gvn
                                               
load of type i64* eliminated in favor of load 
mpd_qsshiftr
gvn
                                                        
load of type i64 eliminated in favor of load 
mpd_qsshiftr
gvn
                                      
load of type i64* eliminated in favor of getelementptr 
_mpd_qget_uint
2475
        result->len = size;
2476
    }
2477
2478
    mpd_copy_flags(result, a);
inline
    
mpd_copy_flags should always be inlined (cost=always) 
mpd_qsshiftr
inline
    
mpd_copy_flags inlined into mpd_qsshiftr 
mpd_qsshiftr
2479
    result->exp = a->exp;
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qsshiftr
gvn
                     
load of type i64 not eliminated in favor of load because it is clobbered by store 
_mpd_qget_uint
2480
2481
    return rnd;
2482
}
2483
2484
/*
2485
 * Inplace shift of the coefficient to the right, no check for specials.
2486
 * Returns the rounding indicator for mpd_rnd_incr().
2487
 * The function cannot fail.
2488
 */
2489
mpd_uint_t
2490
mpd_qshiftr_inplace(mpd_t *result, mpd_ssize_t n)
2491
{
2492
    uint32_t dummy;
2493
    mpd_uint_t rnd;
2494
    mpd_ssize_t size;
2495
2496
    assert(!mpd_isspecial(result));
2497
    assert(n >= 0);
2498
2499
    if (mpd_iszerocoeff(result) || n == 0) {
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qshiftr_inplace
inline
        
mpd_iszerocoeff inlined into mpd_qshiftr_inplace 
mpd_qshiftr_inplace
2500
        return 0;
2501
    }
2502
2503
    if (n >= result->digits) {
2504
        rnd = _mpd_get_rnd(result->data, result->len, (n==result->digits));
inline
              
_mpd_get_rnd too costly to inline (cost=470, threshold=250) 
mpd_qshiftr_inplace
inline
              
_mpd_get_rnd will not be inlined into mpd_qshiftr_inplace 
mpd_qshiftr_inplace
2505
        mpd_zerocoeff(result);
inline
        
mpd_zerocoeff can be inlined into mpd_qshiftr_inplace with cost=120 (threshold=250) 
mpd_qshiftr_inplace
inline
        
mpd_zerocoeff inlined into mpd_qshiftr_inplace 
mpd_qshiftr_inplace
2506
    }
2507
    else {
2508
        rnd = _mpd_baseshiftr(result->data, result->data, result->len, n);
inline
              
_mpd_baseshiftr will not be inlined into mpd_qshiftr_inplace because its definition is unavailable 
mpd_qshiftr_inplace
2509
        result->digits -= n;
gvn
                       
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qshiftr_inplace
2510
        size = mpd_digits_to_size(result->digits);
inline
               
mpd_digits_to_size should always be inlined (cost=always) 
mpd_qshiftr_inplace
inline
               
mpd_digits_to_size inlined into mpd_qshiftr_inplace 
mpd_qshiftr_inplace
2511
        /* reducing the size cannot fail */
2512
        mpd_qresize(result, size, &dummy);
inline
        
mpd_qresize should always be inlined (cost=always) 
mpd_qshiftr_inplace
inline
        
mpd_qresize inlined into mpd_qshiftr_inplace 
mpd_qshiftr_inplace
2513
        result->len = size;
2514
    }
2515
2516
    return rnd;
2517
}
2518
2519
/*
2520
 * Shift the coefficient of the operand to the right, no check for specials.
2521
 * Both operands may be the same pointer. Returns the rounding indicator to
2522
 * be used by mpd_rnd_incr(). If the result length has to be increased,
2523
 * mpd_qcopy() or mpd_qresize() might fail with MPD_Malloc_error. In those
2524
 * cases, MPD_UINT_MAX is returned.
2525
 */
2526
mpd_uint_t
2527
mpd_qshiftr(mpd_t *result, const mpd_t *a, mpd_ssize_t n, uint32_t *status)
2528
{
2529
    mpd_uint_t rnd;
2530
    mpd_ssize_t size;
2531
2532
    assert(!mpd_isspecial(a));
2533
    assert(n >= 0);
2534
2535
    if (mpd_iszerocoeff(a) || n == 0) {
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qshiftr
inline
        
mpd_iszerocoeff inlined into mpd_qshiftr 
mpd_qshiftr
2536
        if (!mpd_qcopy(result, a, status)) {
inline
             
mpd_qcopy can be inlined into mpd_qshiftr with cost=215 (threshold=250) 
mpd_qshiftr
inline
             
mpd_qcopy inlined into mpd_qshiftr 
mpd_qshiftr
2537
            return MPD_UINT_MAX;
2538
        }
2539
        return 0;
2540
    }
2541
2542
    if (n >= a->digits) {
2543
        rnd = _mpd_get_rnd(a->data, a->len, (n==a->digits));
inline
              
_mpd_get_rnd too costly to inline (cost=470, threshold=250) 
mpd_qshiftr
inline
              
_mpd_get_rnd will not be inlined into mpd_qshiftr 
mpd_qshiftr
2544
        mpd_zerocoeff(result);
inline
        
mpd_zerocoeff can be inlined into mpd_qshiftr with cost=120 (threshold=250) 
mpd_qshiftr
inline
        
mpd_zerocoeff inlined into mpd_qshiftr 
mpd_qshiftr
2545
    }
2546
    else {
2547
        result->digits = a->digits-n;
2548
        size = mpd_digits_to_size(result->digits);
inline
               
mpd_digits_to_size should always be inlined (cost=always) 
mpd_qshiftr
inline
               
mpd_digits_to_size inlined into mpd_qshiftr 
mpd_qshiftr
2549
        if (result == a) {
2550
            rnd = _mpd_baseshiftr(result->data, a->data, a->len, n);
inline
                  
_mpd_baseshiftr will not be inlined into mpd_qshiftr because its definition is unavailable 
mpd_qshiftr
gvn
                                          
load of type i64* eliminated in favor of load 
mpd_qshiftr
gvn
                                                   
load of type i64* eliminated in favor of load 
mpd_qshiftr
gvn
                                                            
load of type i64 eliminated in favor of load 
mpd_qshiftr
2551
            /* reducing the size cannot fail */
2552
            mpd_qresize(result, size, status);
inline
            
mpd_qresize should always be inlined (cost=always) 
mpd_qshiftr
inline
            
mpd_qresize inlined into mpd_qshiftr 
mpd_qshiftr
2553
        }
2554
        else {
2555
            if (!mpd_qresize(result, size, status)) {
inline
                 
mpd_qresize should always be inlined (cost=always) 
mpd_qshiftr
inline
                 
mpd_qresize inlined into mpd_qshiftr 
mpd_qshiftr
2556
                return MPD_UINT_MAX;
2557
            }
2558
            rnd = _mpd_baseshiftr(result->data, a->data, a->len, n);
inline
                  
_mpd_baseshiftr will not be inlined into mpd_qshiftr because its definition is unavailable 
mpd_qshiftr
gvn
                                          
load of type i64* not eliminated because it is clobbered by call 
mpd_qshiftr
gvn
                                                   
load of type i64* not eliminated in favor of load because it is clobbered by call 
mpd_qshiftr
gvn
                                                   
load eliminated by PRE 
mpd_qshiftr
gvn
                                                            
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qshiftr
gvn
                                                            
load eliminated by PRE 
mpd_qshiftr
2559
        }
2560
        result->len = size;
2561
    }
2562
2563
    mpd_copy_flags(result, a);
inline
    
mpd_copy_flags should always be inlined (cost=always) 
mpd_qshiftr
inline
    
mpd_copy_flags inlined into mpd_qshiftr 
mpd_qshiftr
2564
    result->exp = a->exp;
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qshiftr
2565
2566
    return rnd;
2567
}
2568
2569
2570
/******************************************************************************/
2571
/*                         Miscellaneous operations                           */
2572
/******************************************************************************/
2573
2574
/* Logical And */
2575
void
2576
mpd_qand(mpd_t *result, const mpd_t *a, const mpd_t *b,
2577
         const mpd_context_t *ctx, uint32_t *status)
2578
{
2579
    const mpd_t *big = a, *small = b;
2580
    mpd_uint_t x, y, z, xbit, ybit;
2581
    int k, mswdigits;
2582
    mpd_ssize_t i;
2583
2584
    if (mpd_isspecial(a) || mpd_isspecial(b) ||
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qand
inline
        
mpd_isspecial inlined into mpd_qand 
mpd_qand
inline
                            
mpd_isspecial should always be inlined (cost=always) 
mpd_qand
inline
                            
mpd_isspecial inlined into mpd_qand 
mpd_qand
2585
        mpd_isnegative(a) || mpd_isnegative(b) ||
inline
        
mpd_isnegative should always be inlined (cost=always) 
mpd_qand
inline
        
mpd_isnegative inlined into mpd_qand 
mpd_qand
inline
                             
mpd_isnegative should always be inlined (cost=always) 
mpd_qand
inline
                             
mpd_isnegative inlined into mpd_qand 
mpd_qand
2586
        a->exp != 0 || b->exp != 0) {
2587
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qand with cost=130 (threshold=250) 
mpd_qand
inline
        
mpd_seterror inlined into mpd_qand 
mpd_qand
2588
        return;
2589
    }
2590
    if (b->digits > a->digits) {
2591
        big = b;
2592
        small = a;
2593
    }
2594
    if (!mpd_qresize(result, big->len, status)) {
inline
         
mpd_qresize should always be inlined (cost=always) 
mpd_qand
inline
         
mpd_qresize inlined into mpd_qand 
mpd_qand
2595
        return;
2596
    }
2597
2598
2599
    /* full words */
2600
    for (i = 0; i < small->len-1; i++) {
licm
                           
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qand
gvn
                           
load of type i64 not eliminated because it is clobbered by call 
mpd_qand
2601
        x = small->data[i];
2602
        y = big->data[i];
2603
        z = 0;
2604
        for (k = 0; k < MPD_RDIGITS; k++) {
loop-vectorize
        
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qand
loop-vectorize
        
loop not vectorized 
mpd_qand
2605
            xbit = x % 10;
2606
            x /= 10;
2607
            ybit = y % 10;
2608
            y /= 10;
2609
            if (xbit > 1 || ybit > 1) {
2610
                goto invalid_operation;
2611
            }
2612
            z += (xbit&ybit) ? mpd_pow10[k] : 0;
2613
        }
2614
        result->data[i] = z;
licm
                
hosting getelementptr 
mpd_qand
licm
                
failed to hoist load with loop-invariant address because load is conditionally executed 
mpd_qand
gvn
                
load of type i64* not eliminated because it is clobbered by call 
mpd_qand
2615
    }
2616
    /* most significant word of small */
2617
    x = small->data[i];
2618
    y = big->data[i];
2619
    z = 0;
2620
    mswdigits = mpd_word_digits(x);
inline
                
mpd_word_digits should always be inlined (cost=always) 
mpd_qand
inline
                
mpd_word_digits inlined into mpd_qand 
mpd_qand
2621
    for (k = 0; k < mswdigits; k++) {
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qand
loop-vectorize
    
loop not vectorized 
mpd_qand
2622
        xbit = x % 10;
2623
        x /= 10;
2624
        ybit = y % 10;
2625
        y /= 10;
2626
        if (xbit > 1 || ybit > 1) {
2627
            goto invalid_operation;
2628
        }
2629
        z += (xbit&ybit) ? mpd_pow10[k] : 0;
2630
    }
2631
    result->data[i++] = z;
gvn
            
load of type i64* not eliminated because it is clobbered by call 
mpd_qand
2632
2633
    /* scan the rest of y for digits > 1 */
2634
    for (; k < MPD_RDIGITS; k++) {
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qand
loop-vectorize
    
loop not vectorized 
mpd_qand
2635
        ybit = y % 10;
2636
        y /= 10;
2637
        if (ybit > 1) {
2638
            goto invalid_operation;
2639
        }
2640
    }
2641
    /* scan the rest of big for digits > 1 */
2642
    for (; i < big->len; i++) {
licm
                    
failed to hoist load with loop-invariant address because load is conditionally executed 
mpd_qand
gvn
                    
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qand
gvn
                    
load of type i64 eliminated in favor of phi 
mpd_qand
2643
        y = big->data[i];
licm
                 
hosting load 
mpd_qand
2644
        for (k = 0; k < MPD_RDIGITS; k++) {
loop-vectorize
        
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qand
loop-vectorize
        
loop not vectorized 
mpd_qand
loop-unroll
        
unrolled loop by a factor of 2 with a breakout at trip 1 
mpd_qand
2645
            ybit = y % 10;
2646
            y /= 10;
2647
            if (ybit > 1) {
2648
                goto invalid_operation;
2649
            }
2650
        }
2651
    }
2652
2653
    mpd_clear_flags(result);
inline
    
mpd_clear_flags should always be inlined (cost=always) 
mpd_qand
inline
    
mpd_clear_flags inlined into mpd_qand 
mpd_qand
2654
    result->exp = 0;
2655
    result->len = _mpd_real_size(result->data, small->len);
inline
                  
_mpd_real_size can be inlined into mpd_qand with cost=-5 (threshold=325) 
mpd_qand
inline
                  
_mpd_real_size inlined into mpd_qand 
mpd_qand
gvn
                                         
load of type i64* eliminated in favor of load 
mpd_qand
gvn
                                                      
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qand
2656
    mpd_qresize(result, result->len, status);
inline
    
mpd_qresize should always be inlined (cost=always) 
mpd_qand
inline
    
mpd_qresize inlined into mpd_qand 
mpd_qand
2657
    mpd_setdigits(result);
inline
    
mpd_setdigits can be inlined into mpd_qand with cost=295 (threshold=325) 
mpd_qand
inline
    
mpd_setdigits inlined into mpd_qand 
mpd_qand
2658
    _mpd_cap(result, ctx);
inline
    
_mpd_cap too costly to inline (cost=630, threshold=625) 
mpd_qand
inline
    
_mpd_cap will not be inlined into mpd_qand 
mpd_qand
2659
    return;
2660
2661
invalid_operation:
2662
    mpd_seterror(result, MPD_Invalid_operation, status);
inline
    
mpd_seterror can be inlined into mpd_qand with cost=130 (threshold=250) 
mpd_qand
inline
    
mpd_seterror inlined into mpd_qand 
mpd_qand
2663
}
2664
2665
/* Class of an operand. Returns a pointer to the constant name. */
2666
const char *
2667
mpd_class(const mpd_t *a, const mpd_context_t *ctx)
2668
{
2669
    if (mpd_isnan(a)) {
inline
        
mpd_isnan should always be inlined (cost=always) 
mpd_class
inline
        
mpd_isnan inlined into mpd_class 
mpd_class
2670
        if (mpd_isqnan(a))
inline
            
mpd_isqnan should always be inlined (cost=always) 
mpd_class
inline
            
mpd_isqnan inlined into mpd_class 
mpd_class
2671
            return "NaN";
2672
        else
2673
            return "sNaN";
2674
    }
2675
    else if (mpd_ispositive(a)) {
inline
             
mpd_ispositive should always be inlined (cost=always) 
mpd_class
inline
             
mpd_ispositive inlined into mpd_class 
mpd_class
2676
        if (mpd_isinfinite(a))
inline
            
mpd_isinfinite should always be inlined (cost=always) 
mpd_class
inline
            
mpd_isinfinite inlined into mpd_class 
mpd_class
2677
            return "+Infinity";
2678
        else if (mpd_iszero(a))
inline
                 
mpd_iszero should always be inlined (cost=always) 
mpd_class
inline
                 
mpd_iszero inlined into mpd_class 
mpd_class
2679
            return "+Zero";
2680
        else if (mpd_isnormal(a, ctx))
inline
                 
mpd_isnormal can be inlined into mpd_class with cost=45 (threshold=325) 
mpd_class
inline
                 
mpd_isnormal inlined into mpd_class 
mpd_class
2681
            return "+Normal";
2682
        else
2683
            return "+Subnormal";
2684
    }
2685
    else {
2686
        if (mpd_isinfinite(a))
2687
            return "-Infinity";
2688
        else if (mpd_iszero(a))
inline
                 
mpd_iszero should always be inlined (cost=always) 
mpd_class
inline
                 
mpd_iszero inlined into mpd_class 
mpd_class
2689
            return "-Zero";
2690
        else if (mpd_isnormal(a, ctx))
inline
                 
mpd_isnormal can be inlined into mpd_class with cost=45 (threshold=325) 
mpd_class
inline
                 
mpd_isnormal inlined into mpd_class 
mpd_class
2691
            return "-Normal";
2692
        else
2693
            return "-Subnormal";
2694
    }
2695
}
2696
2697
/* Logical Xor */
2698
void
2699
mpd_qinvert(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
2700
            uint32_t *status)
2701
{
2702
    mpd_uint_t x, z, xbit;
2703
    mpd_ssize_t i, digits, len;
2704
    mpd_ssize_t q, r;
2705
    int k;
2706
2707
    if (mpd_isspecial(a) || mpd_isnegative(a) || a->exp != 0) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qinvert
inline
        
mpd_isspecial inlined into mpd_qinvert 
mpd_qinvert
inline
                            
mpd_isnegative should always be inlined (cost=always) 
mpd_qinvert
inline
                            
mpd_isnegative inlined into mpd_qinvert 
mpd_qinvert
2708
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qinvert with cost=130 (threshold=250) 
mpd_qinvert
inline
        
mpd_seterror inlined into mpd_qinvert 
mpd_qinvert
2709
        return;
2710
    }
2711
2712
    digits = (a->digits < ctx->prec) ? ctx->prec : a->digits;
2713
    _mpd_idiv_word(&q, &r, digits, MPD_RDIGITS);
inline
    
_mpd_idiv_word can be inlined into mpd_qinvert with cost=-15030 (threshold=487) 
mpd_qinvert
inline
    
_mpd_idiv_word inlined into mpd_qinvert 
mpd_qinvert
2714
    len = (r == 0) ? q : q+1;
2715
    if (!mpd_qresize(result, len, status)) {
inline
         
mpd_qresize should always be inlined (cost=always) 
mpd_qinvert
inline
         
mpd_qresize inlined into mpd_qinvert 
mpd_qinvert
2716
        return;
2717
    }
2718
2719
    for (i = 0; i < len; i++) {
2720
        x = (i < a->len) ? a->data[i] : 0;
licm
                    
hosting getelementptr 
mpd_qinvert
licm
                    
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qinvert
licm
                              
hosting getelementptr 
mpd_qinvert
licm
                              
failed to hoist load with loop-invariant address because load is conditionally executed 
mpd_qinvert
gvn
                    
load of type i64 not eliminated because it is clobbered by store 
mpd_qinvert
gvn
                              
load of type i64* not eliminated because it is clobbered by call 
mpd_qinvert
2721
        z = 0;
2722
        for (k = 0; k < MPD_RDIGITS; k++) {
loop-vectorize
        
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qinvert
loop-vectorize
        
loop not vectorized 
mpd_qinvert
2723
            xbit = x % 10;
2724
            x /= 10;
2725
            if (xbit > 1) {
2726
                goto invalid_operation;
2727
            }
2728
            z += !xbit ? mpd_pow10[k] : 0;
2729
        }
2730
        result->data[i] = z;
licm
                
hosting getelementptr 
mpd_qinvert
licm
                
failed to hoist load with loop-invariant address because load is conditionally executed 
mpd_qinvert
gvn
                
load of type i64* not eliminated because it is clobbered by call 
mpd_qinvert
2731
    }
2732
2733
    mpd_clear_flags(result);
inline
    
mpd_clear_flags should always be inlined (cost=always) 
mpd_qinvert
inline
    
mpd_clear_flags inlined into mpd_qinvert 
mpd_qinvert
2734
    result->exp = 0;
2735
    result->len = _mpd_real_size(result->data, len);
inline
                  
_mpd_real_size can be inlined into mpd_qinvert with cost=-5 (threshold=325) 
mpd_qinvert
inline
                  
_mpd_real_size inlined into mpd_qinvert 
mpd_qinvert
gvn
                                         
load of type i64* not eliminated because it is clobbered by call 
mpd_qinvert
gvn
                                         
load eliminated by PRE 
mpd_qinvert
2736
    mpd_qresize(result, result->len, status);
inline
    
mpd_qresize should always be inlined (cost=always) 
mpd_qinvert
inline
    
mpd_qresize inlined into mpd_qinvert 
mpd_qinvert
2737
    mpd_setdigits(result);
inline
    
mpd_setdigits can be inlined into mpd_qinvert with cost=295 (threshold=325) 
mpd_qinvert
inline
    
mpd_setdigits inlined into mpd_qinvert 
mpd_qinvert
2738
    _mpd_cap(result, ctx);
inline
    
_mpd_cap too costly to inline (cost=630, threshold=625) 
mpd_qinvert
inline
    
_mpd_cap will not be inlined into mpd_qinvert 
mpd_qinvert
2739
    return;
2740
2741
invalid_operation:
2742
    mpd_seterror(result, MPD_Invalid_operation, status);
inline
    
mpd_seterror can be inlined into mpd_qinvert with cost=130 (threshold=250) 
mpd_qinvert
inline
    
mpd_seterror inlined into mpd_qinvert 
mpd_qinvert
2743
}
2744
2745
/* Exponent of the magnitude of the most significant digit of the operand. */
2746
void
2747
mpd_qlogb(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
2748
          uint32_t *status)
2749
{
2750
    if (mpd_isspecial(a)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qlogb
inline
        
mpd_isspecial inlined into mpd_qlogb 
mpd_qlogb
2751
        if (mpd_qcheck_nan(result, a, ctx, status)) {
inline
            
mpd_qcheck_nan too costly to inline (cost=335, threshold=250) 
mpd_qlogb
inline
            
mpd_qcheck_nan will not be inlined into mpd_qlogb 
mpd_qlogb
2752
            return;
2753
        }
2754
        mpd_setspecial(result, MPD_POS, MPD_INF);
inline
        
mpd_setspecial can be inlined into mpd_qlogb with cost=115 (threshold=250) 
mpd_qlogb
inline
        
mpd_setspecial inlined into mpd_qlogb 
mpd_qlogb
2755
    }
2756
    else if (mpd_iszerocoeff(a)) {
inline
             
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qlogb
inline
             
mpd_iszerocoeff inlined into mpd_qlogb 
mpd_qlogb
2757
        mpd_setspecial(result, MPD_NEG, MPD_INF);
inline
        
mpd_setspecial can be inlined into mpd_qlogb with cost=115 (threshold=250) 
mpd_qlogb
inline
        
mpd_setspecial inlined into mpd_qlogb 
mpd_qlogb
2758
        *status |= MPD_Division_by_zero;
gvn
                
load of type i32 not eliminated because it is clobbered by call 
mpd_qlogb
2759
    }
2760
    else {
2761
        mpd_qset_ssize(result, mpd_adjexp(a), ctx, status);
inline
        
mpd_qset_ssize can be inlined into mpd_qlogb with cost=200 (threshold=250) 
mpd_qlogb
inline
        
mpd_qset_ssize inlined into mpd_qlogb 
mpd_qlogb
inline
                               
mpd_adjexp should always be inlined (cost=always) 
mpd_qlogb
inline
                               
mpd_adjexp inlined into mpd_qlogb 
mpd_qlogb
2762
    }
2763
}
2764
2765
/* Logical Or */
2766
void
2767
mpd_qor(mpd_t *result, const mpd_t *a, const mpd_t *b,
2768
        const mpd_context_t *ctx, uint32_t *status)
2769
{
2770
    const mpd_t *big = a, *small = b;
2771
    mpd_uint_t x, y, z, xbit, ybit;
2772
    int k, mswdigits;
2773
    mpd_ssize_t i;
2774
2775
    if (mpd_isspecial(a) || mpd_isspecial(b) ||
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qor
inline
        
mpd_isspecial inlined into mpd_qor 
mpd_qor
inline
                            
mpd_isspecial should always be inlined (cost=always) 
mpd_qor
inline
                            
mpd_isspecial inlined into mpd_qor 
mpd_qor
2776
        mpd_isnegative(a) || mpd_isnegative(b) ||
inline
        
mpd_isnegative should always be inlined (cost=always) 
mpd_qor
inline
        
mpd_isnegative inlined into mpd_qor 
mpd_qor
inline
                             
mpd_isnegative should always be inlined (cost=always) 
mpd_qor
inline
                             
mpd_isnegative inlined into mpd_qor 
mpd_qor
2777
        a->exp != 0 || b->exp != 0) {
2778
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qor with cost=130 (threshold=250) 
mpd_qor
inline
        
mpd_seterror inlined into mpd_qor 
mpd_qor
2779
        return;
2780
    }
2781
    if (b->digits > a->digits) {
2782
        big = b;
2783
        small = a;
2784
    }
2785
    if (!mpd_qresize(result, big->len, status)) {
inline
         
mpd_qresize should always be inlined (cost=always) 
mpd_qor
inline
         
mpd_qresize inlined into mpd_qor 
mpd_qor
2786
        return;
2787
    }
2788
2789
2790
    /* full words */
2791
    for (i = 0; i < small->len-1; i++) {
licm
                           
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qor
gvn
                           
load of type i64 not eliminated because it is clobbered by call 
mpd_qor
2792
        x = small->data[i];
2793
        y = big->data[i];
2794
        z = 0;
2795
        for (k = 0; k < MPD_RDIGITS; k++) {
loop-vectorize
        
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qor
loop-vectorize
        
loop not vectorized 
mpd_qor
2796
            xbit = x % 10;
2797
            x /= 10;
2798
            ybit = y % 10;
2799
            y /= 10;
2800
            if (xbit > 1 || ybit > 1) {
2801
                goto invalid_operation;
2802
            }
2803
            z += (xbit|ybit) ? mpd_pow10[k] : 0;
2804
        }
2805
        result->data[i] = z;
licm
                
hosting getelementptr 
mpd_qor
licm
                
failed to hoist load with loop-invariant address because load is conditionally executed 
mpd_qor
gvn
                
load of type i64* not eliminated because it is clobbered by call 
mpd_qor
2806
    }
2807
    /* most significant word of small */
2808
    x = small->data[i];
2809
    y = big->data[i];
2810
    z = 0;
2811
    mswdigits = mpd_word_digits(x);
inline
                
mpd_word_digits should always be inlined (cost=always) 
mpd_qor
inline
                
mpd_word_digits inlined into mpd_qor 
mpd_qor
2812
    for (k = 0; k < mswdigits; k++) {
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qor
loop-vectorize
    
loop not vectorized 
mpd_qor
2813
        xbit = x % 10;
2814
        x /= 10;
2815
        ybit = y % 10;
2816
        y /= 10;
2817
        if (xbit > 1 || ybit > 1) {
2818
            goto invalid_operation;
2819
        }
2820
        z += (xbit|ybit) ? mpd_pow10[k] : 0;
2821
    }
2822
2823
    /* scan for digits > 1 and copy the rest of y */
2824
    for (; k < MPD_RDIGITS; k++) {
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qor
loop-vectorize
    
loop not vectorized 
mpd_qor
2825
        ybit = y % 10;
2826
        y /= 10;
2827
        if (ybit > 1) {
2828
            goto invalid_operation;
2829
        }
2830
        z += ybit*mpd_pow10[k];
2831
    }
2832
    result->data[i++] = z;
2833
    /* scan for digits > 1 and copy the rest of big */
2834
    for (; i < big->len; i++) {
licm
                    
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qor
gvn
                    
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qor
2835
        y = big->data[i];
licm
                 
hosting load 
mpd_qor
2836
        for (k = 0; k < MPD_RDIGITS; k++) {
loop-vectorize
        
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qor
loop-vectorize
        
loop not vectorized 
mpd_qor
loop-unroll
        
unrolled loop by a factor of 2 with a breakout at trip 1 
mpd_qor
2837
            ybit = y % 10;
2838
            y /= 10;
2839
            if (ybit > 1) {
2840
                goto invalid_operation;
2841
            }
2842
        }
2843
        result->data[i] = big->data[i];
2844
    }
2845
2846
    mpd_clear_flags(result);
inline
    
mpd_clear_flags should always be inlined (cost=always) 
mpd_qor
inline
    
mpd_clear_flags inlined into mpd_qor 
mpd_qor
2847
    result->exp = 0;
2848
    result->len = _mpd_real_size(result->data, big->len);
inline
                  
_mpd_real_size can be inlined into mpd_qor with cost=-5 (threshold=325) 
mpd_qor
inline
                  
_mpd_real_size inlined into mpd_qor 
mpd_qor
gvn
                                         
load of type i64* eliminated in favor of load 
mpd_qor
gvn
                                                    
load of type i64 eliminated in favor of phi 
mpd_qor
2849
    mpd_qresize(result, result->len, status);
inline
    
mpd_qresize should always be inlined (cost=always) 
mpd_qor
inline
    
mpd_qresize inlined into mpd_qor 
mpd_qor
2850
    mpd_setdigits(result);
inline
    
mpd_setdigits can be inlined into mpd_qor with cost=295 (threshold=325) 
mpd_qor
inline
    
mpd_setdigits inlined into mpd_qor 
mpd_qor
2851
    _mpd_cap(result, ctx);
inline
    
_mpd_cap too costly to inline (cost=630, threshold=625) 
mpd_qor
inline
    
_mpd_cap will not be inlined into mpd_qor 
mpd_qor
2852
    return;
2853
2854
invalid_operation:
2855
    mpd_seterror(result, MPD_Invalid_operation, status);
inline
    
mpd_seterror can be inlined into mpd_qor with cost=130 (threshold=250) 
mpd_qor
inline
    
mpd_seterror inlined into mpd_qor 
mpd_qor
2856
}
2857
2858
/*
2859
 * Rotate the coefficient of 'a' by 'b' digits. 'b' must be an integer with
2860
 * exponent 0.
2861
 */
2862
void
2863
mpd_qrotate(mpd_t *result, const mpd_t *a, const mpd_t *b,
2864
            const mpd_context_t *ctx, uint32_t *status)
2865
{
2866
    uint32_t workstatus = 0;
2867
    MPD_NEW_STATIC(tmp,0,0,0,0);
2868
    MPD_NEW_STATIC(big,0,0,0,0);
2869
    MPD_NEW_STATIC(small,0,0,0,0);
2870
    mpd_ssize_t n, lshift, rshift;
2871
2872
    if (mpd_isspecial(a) || mpd_isspecial(b)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qrotate
inline
        
mpd_isspecial inlined into mpd_qrotate 
mpd_qrotate
inline
                            
mpd_isspecial should always be inlined (cost=always) 
mpd_qrotate
inline
                            
mpd_isspecial inlined into mpd_qrotate 
mpd_qrotate
2873
        if (mpd_qcheck_nans(result, a, b, ctx, status)) {
inline
            
mpd_qcheck_nans too costly to inline (cost=385, threshold=250) 
mpd_qrotate
inline
            
mpd_qcheck_nans will not be inlined into mpd_qrotate 
mpd_qrotate
2874
            return;
2875
        }
2876
    }
2877
    if (b->exp != 0 || mpd_isinfinite(b)) {
inline
                       
mpd_isinfinite should always be inlined (cost=always) 
mpd_qrotate
inline
                       
mpd_isinfinite inlined into mpd_qrotate 
mpd_qrotate
gvn
           
load of type i64 not eliminated because it is clobbered by call 
mpd_qrotate
2878
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qrotate with cost=130 (threshold=250) 
mpd_qrotate
inline
        
mpd_seterror inlined into mpd_qrotate 
mpd_qrotate
2879
        return;
2880
    }
2881
2882
    n = mpd_qget_ssize(b, &workstatus);
inline
        
mpd_qget_ssize can be inlined into mpd_qrotate with cost=85 (threshold=250) 
mpd_qrotate
inline
        
mpd_qget_ssize inlined into mpd_qrotate 
mpd_qrotate
2883
    if (workstatus&MPD_Invalid_operation) {
gvn
        
load of type i32 eliminated in favor of phi 
mpd_qrotate
2884
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qrotate with cost=130 (threshold=250) 
mpd_qrotate
inline
        
mpd_seterror inlined into mpd_qrotate 
mpd_qrotate
2885
        return;
2886
    }
2887
    if (n > ctx->prec || n < -ctx->prec) {
gvn
                 
load of type i64 not eliminated because it is clobbered by call 
mpd_qrotate
2888
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qrotate with cost=130 (threshold=250) 
mpd_qrotate
inline
        
mpd_seterror inlined into mpd_qrotate 
mpd_qrotate
2889
        return;
2890
    }
2891
    if (mpd_isinfinite(a)) {
inline
        
mpd_isinfinite should always be inlined (cost=always) 
mpd_qrotate
inline
        
mpd_isinfinite inlined into mpd_qrotate 
mpd_qrotate
2892
        mpd_qcopy(result, a, status);
inline
        
mpd_qcopy can be inlined into mpd_qrotate with cost=215 (threshold=250) 
mpd_qrotate
inline
        
mpd_qcopy inlined into mpd_qrotate 
mpd_qrotate
2893
        return;
2894
    }
2895
2896
    if (n >= 0) {
2897
        lshift = n;
2898
        rshift = ctx->prec-n;
2899
    }
2900
    else {
2901
        lshift = ctx->prec+n;
2902
        rshift = -n;
2903
    }
2904
2905
    if (a->digits > ctx->prec) {
gvn
           
load of type i64 not eliminated because it is clobbered by call 
mpd_qrotate
2906
        if (!mpd_qcopy(&tmp, a, status)) {
inline
             
mpd_qcopy can be inlined into mpd_qrotate with cost=215 (threshold=250) 
mpd_qrotate
inline
             
mpd_qcopy inlined into mpd_qrotate 
mpd_qrotate
2907
            mpd_seterror(result, MPD_Malloc_error, status);
inline
            
mpd_seterror can be inlined into mpd_qrotate with cost=130 (threshold=250) 
mpd_qrotate
inline
            
mpd_seterror inlined into mpd_qrotate 
mpd_qrotate
2908
            goto finish;
2909
        }
2910
        _mpd_cap(&tmp, ctx);
inline
        
_mpd_cap too costly to inline (cost=630, threshold=625) 
mpd_qrotate
inline
        
_mpd_cap will not be inlined into mpd_qrotate 
mpd_qrotate
2911
        a = &tmp;
2912
    }
2913
2914
    if (!mpd_qshiftl(&big, a, lshift, status)) {
inline
         
mpd_qshiftl too costly to inline (cost=575, threshold=250) 
mpd_qrotate
inline
         
mpd_qshiftl will not be inlined into mpd_qrotate 
mpd_qrotate
2915
        mpd_seterror(result, MPD_Malloc_error, status);
inline
        
mpd_seterror can be inlined into mpd_qrotate with cost=130 (threshold=250) 
mpd_qrotate
inline
        
mpd_seterror inlined into mpd_qrotate 
mpd_qrotate
2916
        goto finish;
2917
    }
2918
    _mpd_cap(&big, ctx);
inline
    
_mpd_cap too costly to inline (cost=630, threshold=625) 
mpd_qrotate
inline
    
_mpd_cap will not be inlined into mpd_qrotate 
mpd_qrotate
2919
2920
    if (mpd_qshiftr(&small, a, rshift, status) == MPD_UINT_MAX) {
inline
        
mpd_qshiftr too costly to inline (cost=630, threshold=625) 
mpd_qrotate
inline
        
mpd_qshiftr will not be inlined into mpd_qrotate 
mpd_qrotate
2921
        mpd_seterror(result, MPD_Malloc_error, status);
inline
        
mpd_seterror can be inlined into mpd_qrotate with cost=130 (threshold=250) 
mpd_qrotate
inline
        
mpd_seterror inlined into mpd_qrotate 
mpd_qrotate
2922
        goto finish;
2923
    }
2924
    _mpd_qadd(result, &big, &small, ctx, status);
inline
    
_mpd_qadd can be inlined into mpd_qrotate with cost=15 (threshold=375) 
mpd_qrotate
inline
    
_mpd_qadd inlined into mpd_qrotate 
mpd_qrotate
2925
2926
2927
finish:
2928
    mpd_del(&tmp);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qrotate
inline
    
mpd_del inlined into mpd_qrotate 
mpd_qrotate
2929
    mpd_del(&big);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qrotate
inline
    
mpd_del inlined into mpd_qrotate 
mpd_qrotate
2930
    mpd_del(&small);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qrotate
inline
    
mpd_del inlined into mpd_qrotate 
mpd_qrotate
2931
}
2932
2933
/*
2934
 * b must be an integer with exponent 0 and in the range +-2*(emax + prec).
2935
 * XXX: In my opinion +-(2*emax + prec) would be more sensible.
2936
 * The result is a with the value of b added to its exponent.
2937
 */
2938
void
2939
mpd_qscaleb(mpd_t *result, const mpd_t *a, const mpd_t *b,
2940
            const mpd_context_t *ctx, uint32_t *status)
2941
{
2942
    uint32_t workstatus = 0;
2943
    mpd_uint_t n, maxjump;
2944
#ifndef LEGACY_COMPILER
2945
    int64_t exp;
2946
#else
2947
    mpd_uint_t x;
2948
    int x_sign, n_sign;
2949
    mpd_ssize_t exp;
2950
#endif
2951
2952
    if (mpd_isspecial(a) || mpd_isspecial(b)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qscaleb
inline
        
mpd_isspecial inlined into mpd_qscaleb 
mpd_qscaleb
inline
                            
mpd_isspecial should always be inlined (cost=always) 
mpd_qscaleb
inline
                            
mpd_isspecial inlined into mpd_qscaleb 
mpd_qscaleb
2953
        if (mpd_qcheck_nans(result, a, b, ctx, status)) {
inline
            
mpd_qcheck_nans too costly to inline (cost=385, threshold=250) 
mpd_qscaleb
inline
            
mpd_qcheck_nans will not be inlined into mpd_qscaleb 
mpd_qscaleb
2954
            return;
2955
        }
2956
    }
2957
    if (b->exp != 0 || mpd_isinfinite(b)) {
inline
                       
mpd_isinfinite should always be inlined (cost=always) 
mpd_qscaleb
inline
                       
mpd_isinfinite inlined into mpd_qscaleb 
mpd_qscaleb
gvn
           
load of type i64 not eliminated because it is clobbered by call 
mpd_qscaleb
2958
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qscaleb with cost=130 (threshold=250) 
mpd_qscaleb
inline
        
mpd_seterror inlined into mpd_qscaleb 
mpd_qscaleb
2959
        return;
2960
    }
2961
2962
    n = mpd_qabs_uint(b, &workstatus);
inline
        
mpd_qabs_uint can be inlined into mpd_qscaleb with cost=5 (threshold=375) 
mpd_qscaleb
inline
        
mpd_qabs_uint inlined into mpd_qscaleb 
mpd_qscaleb
2963
    /* the spec demands this */
2964
    maxjump = 2 * (mpd_uint_t)(ctx->emax + ctx->prec);
gvn
                                    
load of type i64 not eliminated because it is clobbered by call 
mpd_qscaleb
gvn
                                                
load of type i64 not eliminated because it is clobbered by call 
mpd_qscaleb
2965
2966
    if (n > maxjump || workstatus&MPD_Invalid_operation) {
gvn
                       
load of type i32 not eliminated in favor of store because it is clobbered by call 
mpd_qscaleb
2967
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qscaleb with cost=130 (threshold=250) 
mpd_qscaleb
inline
        
mpd_seterror inlined into mpd_qscaleb 
mpd_qscaleb
2968
        return;
2969
    }
2970
    if (mpd_isinfinite(a)) {
inline
        
mpd_isinfinite should always be inlined (cost=always) 
mpd_qscaleb
inline
        
mpd_isinfinite inlined into mpd_qscaleb 
mpd_qscaleb
2971
        mpd_qcopy(result, a, status);
inline
        
mpd_qcopy can be inlined into mpd_qscaleb with cost=215 (threshold=250) 
mpd_qscaleb
inline
        
mpd_qcopy inlined into mpd_qscaleb 
mpd_qscaleb
2972
        return;
2973
    }
2974
2975
#ifndef LEGACY_COMPILER
2976
    exp = a->exp + (int64_t)n * mpd_arith_sign(b);
inline
                                
mpd_arith_sign should always be inlined (cost=always) 
mpd_qscaleb
inline
                                
mpd_arith_sign inlined into mpd_qscaleb 
mpd_qscaleb
gvn
             
load of type i64 not eliminated because it is clobbered by call 
mpd_qscaleb
2977
    exp = (exp > MPD_EXP_INF) ? MPD_EXP_INF : exp;
2978
    exp = (exp < MPD_EXP_CLAMP) ? MPD_EXP_CLAMP : exp;
2979
#else
2980
    x = (a->exp < 0) ? -a->exp : a->exp;
2981
    x_sign = (a->exp < 0) ? 1 : 0;
2982
    n_sign = mpd_isnegative(b) ? 1 : 0;
2983
2984
    if (x_sign == n_sign) {
2985
        x = x + n;
2986
        if (x < n) x = MPD_UINT_MAX;
2987
    }
2988
    else {
2989
        x_sign = (x >= n) ? x_sign : n_sign;
2990
        x = (x >= n) ? x - n : n - x;
2991
    }
2992
    if (!x_sign && x > MPD_EXP_INF) x = MPD_EXP_INF;
2993
    if (x_sign && x > -MPD_EXP_CLAMP) x = -MPD_EXP_CLAMP;
2994
    exp = x_sign ? -((mpd_ssize_t)x) : (mpd_ssize_t)x;
2995
#endif
2996
2997
    mpd_qcopy(result, a, status);
inline
    
mpd_qcopy can be inlined into mpd_qscaleb with cost=215 (threshold=250) 
mpd_qscaleb
inline
    
mpd_qcopy inlined into mpd_qscaleb 
mpd_qscaleb
2998
    result->exp = (mpd_ssize_t)exp;
2999
3000
    mpd_qfinalize(result, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qscaleb
inline
    
mpd_qfinalize will not be inlined into mpd_qscaleb 
mpd_qscaleb
3001
}
3002
3003
/*
3004
 * Shift the coefficient by n digits, positive n is a left shift. In the case
3005
 * of a left shift, the result is decapitated to fit the context precision. If
3006
 * you don't want that, use mpd_shiftl().
3007
 */
3008
void
3009
mpd_qshiftn(mpd_t *result, const mpd_t *a, mpd_ssize_t n, const mpd_context_t *ctx,
3010
            uint32_t *status)
3011
{
3012
    if (mpd_isspecial(a)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qshiftn
inline
        
mpd_isspecial inlined into mpd_qshiftn 
mpd_qshiftn
3013
        if (mpd_qcheck_nan(result, a, ctx, status)) {
inline
            
mpd_qcheck_nan too costly to inline (cost=335, threshold=250) 
mpd_qshiftn
inline
            
mpd_qcheck_nan will not be inlined into mpd_qshiftn 
mpd_qshiftn
3014
            return;
3015
        }
3016
        mpd_qcopy(result, a, status);
inline
        
mpd_qcopy can be inlined into mpd_qshiftn with cost=215 (threshold=250) 
mpd_qshiftn
inline
        
mpd_qcopy inlined into mpd_qshiftn 
mpd_qshiftn
3017
        return;
3018
    }
3019
3020
    if (n >= 0 && n <= ctx->prec) {
3021
        mpd_qshiftl(result, a, n, status);
inline
        
mpd_qshiftl too costly to inline (cost=575, threshold=250) 
mpd_qshiftn
inline
        
mpd_qshiftl will not be inlined into mpd_qshiftn 
mpd_qshiftn
3022
        _mpd_cap(result, ctx);
inline
        
_mpd_cap too costly to inline (cost=630, threshold=625) 
mpd_qshiftn
inline
        
_mpd_cap will not be inlined into mpd_qshiftn 
mpd_qshiftn
3023
    }
3024
    else if (n < 0 && n >= -ctx->prec) {
3025
        if (!mpd_qcopy(result, a, status)) {
inline
             
mpd_qcopy can be inlined into mpd_qshiftn with cost=215 (threshold=250) 
mpd_qshiftn
inline
             
mpd_qcopy inlined into mpd_qshiftn 
mpd_qshiftn
3026
            return;
3027
        }
3028
        _mpd_cap(result, ctx);
inline
        
_mpd_cap too costly to inline (cost=630, threshold=625) 
mpd_qshiftn
inline
        
_mpd_cap will not be inlined into mpd_qshiftn 
mpd_qshiftn
3029
        mpd_qshiftr_inplace(result, -n);
inline
        
mpd_qshiftr_inplace too costly to inline (cost=475, threshold=250) 
mpd_qshiftn
inline
        
mpd_qshiftr_inplace will not be inlined into mpd_qshiftn 
mpd_qshiftn
3030
    }
3031
    else {
3032
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qshiftn with cost=130 (threshold=250) 
mpd_qshiftn
inline
        
mpd_seterror inlined into mpd_qshiftn 
mpd_qshiftn
3033
    }
3034
}
3035
3036
/*
3037
 * Same as mpd_shiftn(), but the shift is specified by the decimal b, which
3038
 * must be an integer with a zero exponent. Infinities remain infinities.
3039
 */
3040
void
3041
mpd_qshift(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx,
3042
           uint32_t *status)
3043
{
3044
    uint32_t workstatus = 0;
3045
    mpd_ssize_t n;
3046
3047
    if (mpd_isspecial(a) || mpd_isspecial(b)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qshift
inline
        
mpd_isspecial inlined into mpd_qshift 
mpd_qshift
inline
                            
mpd_isspecial should always be inlined (cost=always) 
mpd_qshift
inline
                            
mpd_isspecial inlined into mpd_qshift 
mpd_qshift
3048
        if (mpd_qcheck_nans(result, a, b, ctx, status)) {
inline
            
mpd_qcheck_nans too costly to inline (cost=385, threshold=250) 
mpd_qshift
inline
            
mpd_qcheck_nans will not be inlined into mpd_qshift 
mpd_qshift
3049
            return;
3050
        }
3051
    }
3052
    if (b->exp != 0 || mpd_isinfinite(b)) {
inline
                       
mpd_isinfinite should always be inlined (cost=always) 
mpd_qshift
inline
                       
mpd_isinfinite inlined into mpd_qshift 
mpd_qshift
gvn
           
load of type i64 not eliminated because it is clobbered by call 
mpd_qshift
3053
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qshift with cost=130 (threshold=250) 
mpd_qshift
inline
        
mpd_seterror inlined into mpd_qshift 
mpd_qshift
3054
        return;
3055
    }
3056
3057
    n = mpd_qget_ssize(b, &workstatus);
inline
        
mpd_qget_ssize can be inlined into mpd_qshift with cost=85 (threshold=250) 
mpd_qshift
inline
        
mpd_qget_ssize inlined into mpd_qshift 
mpd_qshift
3058
    if (workstatus&MPD_Invalid_operation) {
gvn
        
load of type i32 eliminated in favor of phi 
mpd_qshift
3059
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qshift with cost=130 (threshold=250) 
mpd_qshift
inline
        
mpd_seterror inlined into mpd_qshift 
mpd_qshift
3060
        return;
3061
    }
3062
    if (n > ctx->prec || n < -ctx->prec) {
gvn
                 
load of type i64 not eliminated because it is clobbered by call 
mpd_qshift
3063
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qshift with cost=130 (threshold=250) 
mpd_qshift
inline
        
mpd_seterror inlined into mpd_qshift 
mpd_qshift
3064
        return;
3065
    }
3066
    if (mpd_isinfinite(a)) {
inline
        
mpd_isinfinite should always be inlined (cost=always) 
mpd_qshift
inline
        
mpd_isinfinite inlined into mpd_qshift 
mpd_qshift
3067
        mpd_qcopy(result, a, status);
inline
        
mpd_qcopy can be inlined into mpd_qshift with cost=215 (threshold=250) 
mpd_qshift
inline
        
mpd_qcopy inlined into mpd_qshift 
mpd_qshift
3068
        return;
3069
    }
3070
3071
    if (n >= 0) {
3072
        mpd_qshiftl(result, a, n, status);
inline
        
mpd_qshiftl too costly to inline (cost=575, threshold=250) 
mpd_qshift
inline
        
mpd_qshiftl will not be inlined into mpd_qshift 
mpd_qshift
3073
        _mpd_cap(result, ctx);
inline
        
_mpd_cap too costly to inline (cost=630, threshold=625) 
mpd_qshift
inline
        
_mpd_cap will not be inlined into mpd_qshift 
mpd_qshift
3074
    }
3075
    else {
3076
        if (!mpd_qcopy(result, a, status)) {
inline
             
mpd_qcopy can be inlined into mpd_qshift with cost=215 (threshold=250) 
mpd_qshift
inline
             
mpd_qcopy inlined into mpd_qshift 
mpd_qshift
3077
            return;
3078
        }
3079
        _mpd_cap(result, ctx);
inline
        
_mpd_cap too costly to inline (cost=630, threshold=625) 
mpd_qshift
inline
        
_mpd_cap will not be inlined into mpd_qshift 
mpd_qshift
3080
        mpd_qshiftr_inplace(result, -n);
inline
        
mpd_qshiftr_inplace too costly to inline (cost=475, threshold=250) 
mpd_qshift
inline
        
mpd_qshiftr_inplace will not be inlined into mpd_qshift 
mpd_qshift
3081
    }
3082
}
3083
3084
/* Logical Xor */
3085
void
3086
mpd_qxor(mpd_t *result, const mpd_t *a, const mpd_t *b,
3087
        const mpd_context_t *ctx, uint32_t *status)
3088
{
3089
    const mpd_t *big = a, *small = b;
3090
    mpd_uint_t x, y, z, xbit, ybit;
3091
    int k, mswdigits;
3092
    mpd_ssize_t i;
3093
3094
    if (mpd_isspecial(a) || mpd_isspecial(b) ||
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qxor
inline
        
mpd_isspecial inlined into mpd_qxor 
mpd_qxor
inline
                            
mpd_isspecial should always be inlined (cost=always) 
mpd_qxor
inline
                            
mpd_isspecial inlined into mpd_qxor 
mpd_qxor
3095
        mpd_isnegative(a) || mpd_isnegative(b) ||
inline
        
mpd_isnegative should always be inlined (cost=always) 
mpd_qxor
inline
        
mpd_isnegative inlined into mpd_qxor 
mpd_qxor
inline
                             
mpd_isnegative should always be inlined (cost=always) 
mpd_qxor
inline
                             
mpd_isnegative inlined into mpd_qxor 
mpd_qxor
3096
        a->exp != 0 || b->exp != 0) {
3097
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qxor with cost=130 (threshold=250) 
mpd_qxor
inline
        
mpd_seterror inlined into mpd_qxor 
mpd_qxor
3098
        return;
3099
    }
3100
    if (b->digits > a->digits) {
3101
        big = b;
3102
        small = a;
3103
    }
3104
    if (!mpd_qresize(result, big->len, status)) {
inline
         
mpd_qresize should always be inlined (cost=always) 
mpd_qxor
inline
         
mpd_qresize inlined into mpd_qxor 
mpd_qxor
3105
        return;
3106
    }
3107
3108
3109
    /* full words */
3110
    for (i = 0; i < small->len-1; i++) {
licm
                           
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qxor
gvn
                           
load of type i64 not eliminated because it is clobbered by call 
mpd_qxor
3111
        x = small->data[i];
3112
        y = big->data[i];
3113
        z = 0;
3114
        for (k = 0; k < MPD_RDIGITS; k++) {
loop-vectorize
        
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qxor
loop-vectorize
        
loop not vectorized 
mpd_qxor
3115
            xbit = x % 10;
3116
            x /= 10;
3117
            ybit = y % 10;
3118
            y /= 10;
3119
            if (xbit > 1 || ybit > 1) {
3120
                goto invalid_operation;
3121
            }
3122
            z += (xbit^ybit) ? mpd_pow10[k] : 0;
3123
        }
3124
        result->data[i] = z;
licm
                
hosting getelementptr 
mpd_qxor
licm
                
failed to hoist load with loop-invariant address because load is conditionally executed 
mpd_qxor
gvn
                
load of type i64* not eliminated because it is clobbered by call 
mpd_qxor
3125
    }
3126
    /* most significant word of small */
3127
    x = small->data[i];
3128
    y = big->data[i];
3129
    z = 0;
3130
    mswdigits = mpd_word_digits(x);
inline
                
mpd_word_digits should always be inlined (cost=always) 
mpd_qxor
inline
                
mpd_word_digits inlined into mpd_qxor 
mpd_qxor
3131
    for (k = 0; k < mswdigits; k++) {
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qxor
loop-vectorize
    
loop not vectorized 
mpd_qxor
3132
        xbit = x % 10;
3133
        x /= 10;
3134
        ybit = y % 10;
3135
        y /= 10;
3136
        if (xbit > 1 || ybit > 1) {
3137
            goto invalid_operation;
3138
        }
3139
        z += (xbit^ybit) ? mpd_pow10[k] : 0;
3140
    }
3141
3142
    /* scan for digits > 1 and copy the rest of y */
3143
    for (; k < MPD_RDIGITS; k++) {
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qxor
loop-vectorize
    
loop not vectorized 
mpd_qxor
3144
        ybit = y % 10;
3145
        y /= 10;
3146
        if (ybit > 1) {
3147
            goto invalid_operation;
3148
        }
3149
        z += ybit*mpd_pow10[k];
3150
    }
3151
    result->data[i++] = z;
3152
    /* scan for digits > 1 and copy the rest of big */
3153
    for (; i < big->len; i++) {
licm
                    
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qxor
gvn
                    
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qxor
3154
        y = big->data[i];
licm
                 
hosting load 
mpd_qxor
3155
        for (k = 0; k < MPD_RDIGITS; k++) {
loop-vectorize
        
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qxor
loop-vectorize
        
loop not vectorized 
mpd_qxor
loop-unroll
        
unrolled loop by a factor of 2 with a breakout at trip 1 
mpd_qxor
3156
            ybit = y % 10;
3157
            y /= 10;
3158
            if (ybit > 1) {
3159
                goto invalid_operation;
3160
            }
3161
        }
3162
        result->data[i] = big->data[i];
3163
    }
3164
3165
    mpd_clear_flags(result);
inline
    
mpd_clear_flags should always be inlined (cost=always) 
mpd_qxor
inline
    
mpd_clear_flags inlined into mpd_qxor 
mpd_qxor
3166
    result->exp = 0;
3167
    result->len = _mpd_real_size(result->data, big->len);
inline
                  
_mpd_real_size can be inlined into mpd_qxor with cost=-5 (threshold=325) 
mpd_qxor
inline
                  
_mpd_real_size inlined into mpd_qxor 
mpd_qxor
gvn
                                         
load of type i64* eliminated in favor of load 
mpd_qxor
gvn
                                                    
load of type i64 eliminated in favor of phi 
mpd_qxor
3168
    mpd_qresize(result, result->len, status);
inline
    
mpd_qresize should always be inlined (cost=always) 
mpd_qxor
inline
    
mpd_qresize inlined into mpd_qxor 
mpd_qxor
3169
    mpd_setdigits(result);
inline
    
mpd_setdigits can be inlined into mpd_qxor with cost=295 (threshold=325) 
mpd_qxor
inline
    
mpd_setdigits inlined into mpd_qxor 
mpd_qxor
3170
    _mpd_cap(result, ctx);
inline
    
_mpd_cap too costly to inline (cost=630, threshold=625) 
mpd_qxor
inline
    
_mpd_cap will not be inlined into mpd_qxor 
mpd_qxor
3171
    return;
3172
3173
invalid_operation:
3174
    mpd_seterror(result, MPD_Invalid_operation, status);
inline
    
mpd_seterror can be inlined into mpd_qxor with cost=130 (threshold=250) 
mpd_qxor
inline
    
mpd_seterror inlined into mpd_qxor 
mpd_qxor
3175
}
3176
3177
3178
/******************************************************************************/
3179
/*                         Arithmetic operations                              */
3180
/******************************************************************************/
3181
3182
/*
3183
 * The absolute value of a. If a is negative, the result is the same
3184
 * as the result of the minus operation. Otherwise, the result is the
3185
 * result of the plus operation.
3186
 */
3187
void
3188
mpd_qabs(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
3189
         uint32_t *status)
3190
{
3191
    if (mpd_isspecial(a)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qabs
inline
        
mpd_isspecial inlined into mpd_qabs 
mpd_qabs
3192
        if (mpd_qcheck_nan(result, a, ctx, status)) {
inline
            
mpd_qcheck_nan too costly to inline (cost=335, threshold=250) 
mpd_qabs
inline
            
mpd_qcheck_nan will not be inlined into mpd_qabs 
mpd_qabs
3193
            return;
3194
        }
3195
    }
3196
3197
    if (mpd_isnegative(a)) {
inline
        
mpd_isnegative should always be inlined (cost=always) 
mpd_qabs
inline
        
mpd_isnegative inlined into mpd_qabs 
mpd_qabs
3198
        mpd_qminus(result, a, ctx, status);
inline
        
mpd_qminus too costly to inline (cost=630, threshold=625) 
mpd_qabs
inline
        
mpd_qminus will not be inlined into mpd_qabs 
mpd_qabs
3199
    }
3200
    else {
3201
        mpd_qplus(result, a, ctx, status);
inline
        
mpd_qplus too costly to inline (cost=660, threshold=625) 
mpd_qabs
inline
        
mpd_qplus will not be inlined into mpd_qabs 
mpd_qabs
3202
    }
3203
}
3204
3205
static inline void
3206
_mpd_ptrswap(const mpd_t **a, const mpd_t **b)
3207
{
3208
    const mpd_t *t = *a;
3209
    *a = *b;
3210
    *b = t;
3211
}
3212
3213
/* Add or subtract infinities. */
3214
static void
3215
_mpd_qaddsub_inf(mpd_t *result, const mpd_t *a, const mpd_t *b, uint8_t sign_b,
3216
                 uint32_t *status)
3217
{
3218
    if (mpd_isinfinite(a)) {
inline
        
mpd_isinfinite should always be inlined (cost=always) 
_mpd_qaddsub_inf
inline
        
mpd_isinfinite inlined into _mpd_qaddsub_inf 
_mpd_qaddsub_inf
3219
        if (mpd_sign(a) != sign_b && mpd_isinfinite(b)) {
inline
            
mpd_sign should always be inlined (cost=always) 
_mpd_qaddsub_inf
inline
            
mpd_sign inlined into _mpd_qaddsub_inf 
_mpd_qaddsub_inf
inline
                                     
mpd_isinfinite should always be inlined (cost=always) 
_mpd_qaddsub_inf
inline
                                     
mpd_isinfinite inlined into _mpd_qaddsub_inf 
_mpd_qaddsub_inf
3220
            mpd_seterror(result, MPD_Invalid_operation, status);
inline
            
Not inlining. Cost of inlining mpd_seterror increases the cost of inlining _mpd_qaddsub_inf in other contexts 
_mpd_qaddsub_inf
inline
            
mpd_seterror will not be inlined into _mpd_qaddsub_inf 
_mpd_qaddsub_inf
inline
            
mpd_seterror can be inlined into mpd_qadd with cost=130 (threshold=250) 
mpd_qadd
inline
            
mpd_seterror inlined into mpd_qadd 
mpd_qadd
inline
            
mpd_seterror can be inlined into mpd_qsub with cost=130 (threshold=250) 
mpd_qsub
inline
            
mpd_seterror inlined into mpd_qsub 
mpd_qsub
3221
        }
3222
        else {
3223
            mpd_setspecial(result, mpd_sign(a), MPD_INF);
inline
            
Not inlining. Cost of inlining mpd_setspecial increases the cost of inlining _mpd_qaddsub_inf in other contexts 
_mpd_qaddsub_inf
inline
            
mpd_setspecial will not be inlined into _mpd_qaddsub_inf 
_mpd_qaddsub_inf
inline
                                   
mpd_sign should always be inlined (cost=always) 
_mpd_qaddsub_inf
inline
                                   
mpd_sign inlined into _mpd_qaddsub_inf 
_mpd_qaddsub_inf
inline
            
mpd_setspecial can be inlined into mpd_qadd with cost=120 (threshold=250) 
mpd_qadd
inline
            
mpd_setspecial inlined into mpd_qadd 
mpd_qadd
inline
            
mpd_setspecial can be inlined into mpd_qsub with cost=120 (threshold=250) 
mpd_qsub
inline
            
mpd_setspecial inlined into mpd_qsub 
mpd_qsub
3224
        }
3225
        return;
3226
    }
3227
    assert(mpd_isinfinite(b));
3228
    mpd_setspecial(result, sign_b, MPD_INF);
inline
    
Not inlining. Cost of inlining mpd_setspecial increases the cost of inlining _mpd_qaddsub_inf in other contexts 
_mpd_qaddsub_inf
inline
    
mpd_setspecial will not be inlined into _mpd_qaddsub_inf 
_mpd_qaddsub_inf
inline
    
mpd_setspecial can be inlined into mpd_qadd with cost=120 (threshold=250) 
mpd_qadd
inline
    
mpd_setspecial inlined into mpd_qadd 
mpd_qadd
inline
    
mpd_setspecial can be inlined into mpd_qsub with cost=120 (threshold=250) 
mpd_qsub
inline
    
mpd_setspecial inlined into mpd_qsub 
mpd_qsub
3229
}
3230
3231
/* Add or subtract non-special numbers. */
3232
static void
3233
_mpd_qaddsub(mpd_t *result, const mpd_t *a, const mpd_t *b, uint8_t sign_b,
3234
             const mpd_context_t *ctx, uint32_t *status)
3235
{
3236
    const mpd_t *big, *small;
3237
    MPD_NEW_STATIC(big_aligned,0,0,0,0);
3238
    MPD_NEW_CONST(tiny,0,0,1,1,1,1);
3239
    mpd_uint_t carry;
3240
    mpd_ssize_t newsize, shift;
3241
    mpd_ssize_t exp, i;
3242
    int swap = 0;
3243
3244
3245
    /* compare exponents */
3246
    big = a; small = b;
3247
    if (big->exp != small->exp) {
3248
        if (small->exp > big->exp) {
3249
            _mpd_ptrswap(&big, &small);
inline
            
_mpd_ptrswap can be inlined into _mpd_qaddsub with cost=-40 (threshold=487) 
_mpd_qaddsub
inline
            
_mpd_ptrswap inlined into _mpd_qaddsub 
_mpd_qaddsub
3250
            swap++;
3251
        }
3252
        /* align the coefficients */
3253
        if (!mpd_iszerocoeff(big)) {
inline
             
mpd_iszerocoeff should always be inlined (cost=always) 
_mpd_qaddsub
inline
             
mpd_iszerocoeff inlined into _mpd_qaddsub 
_mpd_qaddsub
3254
            exp = big->exp - 1;
3255
            exp += (big->digits > ctx->prec) ? 0 : big->digits-ctx->prec-1;
3256
            if (mpd_adjexp(small) < exp) {
inline
                
mpd_adjexp should always be inlined (cost=always) 
_mpd_qaddsub
inline
                
mpd_adjexp inlined into _mpd_qaddsub 
_mpd_qaddsub
3257
                /*
3258
                 * Avoid huge shifts by substituting a value for small that is
3259
                 * guaranteed to produce the same results.
3260
                 *
3261
                 * adjexp(small) < exp if and only if:
3262
                 *
3263
                 *   bdigits <= prec AND
3264
                 *   bdigits+shift >= prec+2+sdigits AND
3265
                 *   exp = bexp+bdigits-prec-2
3266
                 *
3267
                 *     1234567000000000  ->  bdigits + shift
3268
                 *     ----------XX1234  ->  sdigits
3269
                 *     ----------X1      ->  tiny-digits
3270
                 *     |- prec -|
3271
                 *
3272
                 *      OR
3273
                 *
3274
                 *   bdigits > prec AND
3275
                 *   shift > sdigits AND
3276
                 *   exp = bexp-1
3277
                 *
3278
                 *     1234567892100000  ->  bdigits + shift
3279
                 *     ----------XX1234  ->  sdigits
3280
                 *     ----------X1      ->  tiny-digits
3281
                 *     |- prec -|
3282
                 *
3283
                 * If tiny is zero, adding or subtracting is a no-op.
3284
                 * Otherwise, adding tiny generates a non-zero digit either
3285
                 * below the rounding digit or the least significant digit
3286
                 * of big. When subtracting, tiny is in the same position as
3287
                 * the carry that would be generated by subtracting sdigits.
3288
                 */
3289
                mpd_copy_flags(&tiny, small);
inline
                
mpd_copy_flags should always be inlined (cost=always) 
_mpd_qaddsub
inline
                
mpd_copy_flags inlined into _mpd_qaddsub 
_mpd_qaddsub
3290
                tiny.exp = exp;
3291
                tiny.digits = 1;
3292
                tiny.len = 1;
3293
                tiny.data[0] = mpd_iszerocoeff(small) ? 0 : 1;
inline
                               
mpd_iszerocoeff should always be inlined (cost=always) 
_mpd_qaddsub
inline
                               
mpd_iszerocoeff inlined into _mpd_qaddsub 
_mpd_qaddsub
gvn
                     
load of type i64* eliminated in favor of getelementptr 
_mpd_qaddsub
3294
                small = &tiny;
3295
            }
3296
            /* This cannot wrap: the difference is positive and <= maxprec */
3297
            shift = big->exp - small->exp;
gvn
                         
load of type i64 not eliminated in favor of load because it is clobbered by store 
_mpd_qaddsub
gvn
                         
load eliminated by PRE 
_mpd_qaddsub
gvn
                                      
load eliminated by PRE 
_mpd_qaddsub
3298
            if (!mpd_qshiftl(&big_aligned, big, shift, status)) {
inline
                 
mpd_qshiftl too costly to inline (cost=575, threshold=250) 
_mpd_qaddsub
inline
                 
mpd_qshiftl will not be inlined into _mpd_qaddsub 
_mpd_qaddsub
3299
                mpd_seterror(result, MPD_Malloc_error, status);
inline
                
mpd_seterror can be inlined into _mpd_qaddsub with cost=130 (threshold=250) 
_mpd_qaddsub
inline
                
mpd_seterror inlined into _mpd_qaddsub 
_mpd_qaddsub
3300
                goto finish;
3301
            }
3302
            big = &big_aligned;
3303
        }
3304
    }
3305
    result->exp = small->exp;
gvn
                         
load of type i64 not eliminated because it is clobbered by call 
_mpd_qaddsub
3306
3307
3308
    /* compare length of coefficients */
3309
    if (big->len < small->len) {
3310
        _mpd_ptrswap(&big, &small);
inline
        
_mpd_ptrswap can be inlined into _mpd_qaddsub with cost=-40 (threshold=487) 
_mpd_qaddsub
inline
        
_mpd_ptrswap inlined into _mpd_qaddsub 
_mpd_qaddsub
3311
        swap++;
3312
    }
3313
3314
    newsize = big->len;
3315
    if (!mpd_qresize(result, newsize, status)) {
inline
         
mpd_qresize should always be inlined (cost=always) 
_mpd_qaddsub
inline
         
mpd_qresize inlined into _mpd_qaddsub 
_mpd_qaddsub
3316
        goto finish;
3317
    }
3318
3319
    if (mpd_sign(a) == sign_b) {
inline
        
mpd_sign should always be inlined (cost=always) 
_mpd_qaddsub
inline
        
mpd_sign inlined into _mpd_qaddsub 
_mpd_qaddsub
3320
3321
        carry = _mpd_baseadd(result->data, big->data, small->data,
inline
                
_mpd_baseadd will not be inlined into _mpd_qaddsub because its definition is unavailable 
_mpd_qaddsub
gvn
                                     
load of type i64* not eliminated because it is clobbered by call 
_mpd_qaddsub
gvn
                                                
load of type i64* not eliminated because it is clobbered by call 
_mpd_qaddsub
gvn
                                                             
load of type i64* not eliminated because it is clobbered by call 
_mpd_qaddsub
3322
                             big->len, small->len);
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
_mpd_qaddsub
gvn
                                  
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qaddsub
3323
3324
        if (carry) {
3325
            newsize = big->len + 1;
gvn
                           
load of type i64 not eliminated because it is clobbered by call 
_mpd_qaddsub
3326
            if (!mpd_qresize(result, newsize, status)) {
inline
                 
mpd_qresize should always be inlined (cost=always) 
_mpd_qaddsub
inline
                 
mpd_qresize inlined into _mpd_qaddsub 
_mpd_qaddsub
3327
                goto finish;
3328
            }
3329
            result->data[newsize-1] = carry;
gvn
                    
load of type i64* not eliminated in favor of load because it is clobbered by call 
_mpd_qaddsub
3330
        }
3331
3332
        result->len = newsize;
3333
        mpd_set_flags(result, sign_b);
inline
        
mpd_set_flags should always be inlined (cost=always) 
_mpd_qaddsub
inline
        
mpd_set_flags inlined into _mpd_qaddsub 
_mpd_qaddsub
3334
    }
3335
    else {
3336
        if (big->len == small->len) {
gvn
                 
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qaddsub
3337
            for (i=big->len-1; i >= 0; --i) {
loop-vectorize
            
loop not vectorized: loop control flow is not understood by vectorizer 
_mpd_qaddsub
loop-vectorize
            
loop not vectorized 
_mpd_qaddsub
3338
                if (big->data[i] != small->data[i]) {
licm
                         
hosting getelementptr 
_mpd_qaddsub
licm
                         
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_qaddsub
licm
                                           
hosting getelementptr 
_mpd_qaddsub
licm
                                           
failed to hoist load with loop-invariant address because load is conditionally executed 
_mpd_qaddsub
gvn
                         
load of type i64* not eliminated because it is clobbered by call 
_mpd_qaddsub
gvn
                                           
load of type i64* not eliminated because it is clobbered by call 
_mpd_qaddsub
3339
                    if (big->data[i] < small->data[i]) {
3340
                        _mpd_ptrswap(&big, &small);
inline
                        
_mpd_ptrswap can be inlined into _mpd_qaddsub with cost=-40 (threshold=487) 
_mpd_qaddsub
inline
                        
_mpd_ptrswap inlined into _mpd_qaddsub 
_mpd_qaddsub
3341
                        swap++;
3342
                    }
3343
                    break;
3344
                }
3345
            }
3346
        }
3347
3348
        _mpd_basesub(result->data, big->data, small->data,
inline
        
_mpd_basesub will not be inlined into _mpd_qaddsub because its definition is unavailable 
_mpd_qaddsub
gvn
                             
load of type i64* not eliminated because it is clobbered by call 
_mpd_qaddsub
gvn
                                        
load of type i64* not eliminated because it is clobbered by call 
_mpd_qaddsub
gvn
                                                     
load of type i64* not eliminated because it is clobbered by call 
_mpd_qaddsub
3349
                     big->len, small->len);
gvn
                          
load eliminated by PRE 
_mpd_qaddsub
gvn
                                      
load eliminated by PRE 
_mpd_qaddsub
3350
        newsize = _mpd_real_size(result->data, big->len);
inline
                  
_mpd_real_size can be inlined into _mpd_qaddsub with cost=-5 (threshold=325) 
_mpd_qaddsub
inline
                  
_mpd_real_size inlined into _mpd_qaddsub 
_mpd_qaddsub
gvn
                                         
load of type i64* not eliminated in favor of load because it is clobbered by call 
_mpd_qaddsub
gvn
                                                    
load of type i64 not eliminated because it is clobbered by call 
_mpd_qaddsub
3351
        /* resize to smaller cannot fail */
3352
        (void)mpd_qresize(result, newsize, status);
inline
              
mpd_qresize should always be inlined (cost=always) 
_mpd_qaddsub
inline
              
mpd_qresize inlined into _mpd_qaddsub 
_mpd_qaddsub
3353
3354
        result->len = newsize;
3355
        sign_b = (swap & 1) ? sign_b : mpd_sign(a);
inline
                                       
mpd_sign should always be inlined (cost=always) 
_mpd_qaddsub
inline
                                       
mpd_sign inlined into _mpd_qaddsub 
_mpd_qaddsub
3356
        mpd_set_flags(result, sign_b);
inline
        
mpd_set_flags should always be inlined (cost=always) 
_mpd_qaddsub
inline
        
mpd_set_flags inlined into _mpd_qaddsub 
_mpd_qaddsub
3357
3358
        if (mpd_iszerocoeff(result)) {
inline
            
mpd_iszerocoeff should always be inlined (cost=always) 
_mpd_qaddsub
inline
            
mpd_iszerocoeff inlined into _mpd_qaddsub 
_mpd_qaddsub
3359
            mpd_set_positive(result);
inline
            
mpd_set_positive should always be inlined (cost=always) 
_mpd_qaddsub
inline
            
mpd_set_positive inlined into _mpd_qaddsub 
_mpd_qaddsub
3360
            if (ctx->round == MPD_ROUND_FLOOR) {
gvn
                     
load of type i32 not eliminated because it is clobbered by call 
_mpd_qaddsub
3361
                mpd_set_negative(result);
inline
                
mpd_set_negative should always be inlined (cost=always) 
_mpd_qaddsub
inline
                
mpd_set_negative inlined into _mpd_qaddsub 
_mpd_qaddsub
3362
            }
3363
        }
3364
    }
3365
3366
    mpd_setdigits(result);
inline
    
mpd_setdigits can be inlined into _mpd_qaddsub with cost=295 (threshold=325) 
_mpd_qaddsub
inline
    
mpd_setdigits inlined into _mpd_qaddsub 
_mpd_qaddsub
3367
3368
finish:
3369
    mpd_del(&big_aligned);
inline
    
mpd_del should always be inlined (cost=always) 
_mpd_qaddsub
inline
    
mpd_del inlined into _mpd_qaddsub 
_mpd_qaddsub
3370
}
3371
3372
/* Add a and b. No specials, no finalizing. */
3373
static void
3374
_mpd_qadd(mpd_t *result, const mpd_t *a, const mpd_t *b,
3375
          const mpd_context_t *ctx, uint32_t *status)
3376
{
3377
    _mpd_qaddsub(result, a, b, mpd_sign(b), ctx, status);
inline
                               
mpd_sign should always be inlined (cost=always) 
_mpd_qadd
inline
                               
mpd_sign inlined into _mpd_qadd 
_mpd_qadd
inline
    
_mpd_qaddsub too costly to inline (cost=630, threshold=625) 
_mpd_qadd
inline
    
_mpd_qaddsub will not be inlined into _mpd_qadd 
_mpd_qadd
inline
    
_mpd_qaddsub too costly to inline (cost=630, threshold=625) 
mpd_qrotate
inline
    
_mpd_qaddsub will not be inlined into mpd_qrotate 
mpd_qrotate
inline
    
_mpd_qaddsub too costly to inline (cost=630, threshold=625) 
mpd_qrem_near
inline
    
_mpd_qaddsub will not be inlined into mpd_qrem_near 
mpd_qrem_near
3378
}
3379
3380
/* Subtract b from a. No specials, no finalizing. */
3381
static void
3382
_mpd_qsub(mpd_t *result, const mpd_t *a, const mpd_t *b,
3383
          const mpd_context_t *ctx, uint32_t *status)
3384
{
3385
     _mpd_qaddsub(result, a, b, !mpd_sign(b), ctx, status);
inline
                                 
mpd_sign should always be inlined (cost=always) 
_mpd_qsub
inline
                                 
mpd_sign inlined into _mpd_qsub 
_mpd_qsub
inline
     
_mpd_qaddsub too costly to inline (cost=630, threshold=625) 
_mpd_qsub
inline
     
_mpd_qaddsub will not be inlined into _mpd_qsub 
_mpd_qsub
inline
     
_mpd_qaddsub too costly to inline (cost=630, threshold=625) 
_mpd_qln
inline
     
_mpd_qaddsub will not be inlined into _mpd_qln 
_mpd_qln
inline
     
_mpd_qaddsub too costly to inline (cost=630, threshold=625) 
mpd_qrem_near
inline
     
_mpd_qaddsub will not be inlined into mpd_qrem_near 
mpd_qrem_near
3386
}
3387
3388
/* Add a and b. */
3389
void
3390
mpd_qadd(mpd_t *result, const mpd_t *a, const mpd_t *b,
3391
         const mpd_context_t *ctx, uint32_t *status)
3392
{
3393
    if (mpd_isspecial(a) || mpd_isspecial(b)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qadd
inline
        
mpd_isspecial inlined into mpd_qadd 
mpd_qadd
inline
                            
mpd_isspecial should always be inlined (cost=always) 
mpd_qadd
inline
                            
mpd_isspecial inlined into mpd_qadd 
mpd_qadd
3394
        if (mpd_qcheck_nans(result, a, b, ctx, status)) {
inline
            
mpd_qcheck_nans too costly to inline (cost=385, threshold=250) 
mpd_qadd
inline
            
mpd_qcheck_nans will not be inlined into mpd_qadd 
mpd_qadd
3395
            return;
3396
        }
3397
        _mpd_qaddsub_inf(result, a, b, mpd_sign(b), status);
inline
                                       
mpd_sign should always be inlined (cost=always) 
mpd_qadd
inline
                                       
mpd_sign inlined into mpd_qadd 
mpd_qadd
inline
        
_mpd_qaddsub_inf can be inlined into mpd_qadd with cost=130 (threshold=250) 
mpd_qadd
inline
        
_mpd_qaddsub_inf inlined into mpd_qadd 
mpd_qadd
3398
        return;
3399
    }
3400
3401
    _mpd_qaddsub(result, a, b, mpd_sign(b), ctx, status);
inline
    
_mpd_qaddsub too costly to inline (cost=630, threshold=625) 
mpd_qadd
inline
    
_mpd_qaddsub will not be inlined into mpd_qadd 
mpd_qadd
inline
                               
mpd_sign should always be inlined (cost=always) 
mpd_qadd
inline
                               
mpd_sign inlined into mpd_qadd 
mpd_qadd
3402
    mpd_qfinalize(result, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qadd
inline
    
mpd_qfinalize will not be inlined into mpd_qadd 
mpd_qadd
3403
}
3404
3405
/* Add a and b. Set NaN/Invalid_operation if the result is inexact. */
3406
static void
3407
_mpd_qadd_exact(mpd_t *result, const mpd_t *a, const mpd_t *b,
3408
                const mpd_context_t *ctx, uint32_t *status)
3409
{
3410
    uint32_t workstatus = 0;
licm
    
hosting bitcast 
_mpd_base_ndivmod
licm
    
hosting bitcast 
mpd_qsqrt
3411
3412
    mpd_qadd(result, a, b, ctx, &workstatus);
inline
    
mpd_qadd too costly to inline (cost=660, threshold=625) 
_mpd_qadd_exact
inline
    
mpd_qadd will not be inlined into _mpd_qadd_exact 
_mpd_qadd_exact
inline
    
mpd_qadd too costly to inline (cost=660, threshold=625) 
_mpd_base_ndivmod
inline
    
mpd_qadd will not be inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
inline
    
mpd_qadd too costly to inline (cost=660, threshold=625) 
mpd_qsqrt
inline
    
mpd_qadd will not be inlined into mpd_qsqrt 
mpd_qsqrt
3413
    *status |= workstatus;
gvn
               
load of type i32 not eliminated in favor of store because it is clobbered by call 
_mpd_qadd_exact
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qadd_exact
licm
               
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_base_ndivmod
licm
            
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_base_ndivmod
gvn
               
load of type i32 not eliminated in favor of store because it is clobbered by call 
_mpd_base_ndivmod
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_base_ndivmod
licm
               
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qsqrt
licm
            
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qsqrt
gvn
               
load of type i32 not eliminated in favor of store because it is clobbered by call 
mpd_qsqrt
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qsqrt
3414
    if (workstatus & (MPD_Inexact|MPD_Rounded|MPD_Clamped)) {
3415
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into _mpd_qadd_exact with cost=130 (threshold=250) 
_mpd_qadd_exact
inline
        
mpd_seterror inlined into _mpd_qadd_exact 
_mpd_qadd_exact
3416
    }
3417
}
3418
3419
/* Subtract b from a. */
3420
void
3421
mpd_qsub(mpd_t *result, const mpd_t *a, const mpd_t *b,
3422
         const mpd_context_t *ctx, uint32_t *status)
3423
{
3424
    if (mpd_isspecial(a) || mpd_isspecial(b)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qsub
inline
        
mpd_isspecial inlined into mpd_qsub 
mpd_qsub
inline
                            
mpd_isspecial should always be inlined (cost=always) 
mpd_qsub
inline
                            
mpd_isspecial inlined into mpd_qsub 
mpd_qsub
3425
        if (mpd_qcheck_nans(result, a, b, ctx, status)) {
inline
            
mpd_qcheck_nans too costly to inline (cost=385, threshold=250) 
mpd_qsub
inline
            
mpd_qcheck_nans will not be inlined into mpd_qsub 
mpd_qsub
3426
            return;
3427
        }
3428
        _mpd_qaddsub_inf(result, a, b, !mpd_sign(b), status);
inline
                                        
mpd_sign should always be inlined (cost=always) 
mpd_qsub
inline
                                        
mpd_sign inlined into mpd_qsub 
mpd_qsub
inline
        
_mpd_qaddsub_inf can be inlined into mpd_qsub with cost=-14870 (threshold=250) 
mpd_qsub
inline
        
_mpd_qaddsub_inf inlined into mpd_qsub 
mpd_qsub
3429
        return;
3430
    }
3431
3432
    _mpd_qaddsub(result, a, b, !mpd_sign(b), ctx, status);
inline
    
_mpd_qaddsub too costly to inline (cost=630, threshold=625) 
mpd_qsub
inline
    
_mpd_qaddsub will not be inlined into mpd_qsub 
mpd_qsub
inline
                                
mpd_sign should always be inlined (cost=always) 
mpd_qsub
inline
                                
mpd_sign inlined into mpd_qsub 
mpd_qsub
3433
    mpd_qfinalize(result, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qsub
inline
    
mpd_qfinalize will not be inlined into mpd_qsub 
mpd_qsub
3434
}
3435
3436
/* Subtract b from a. Set NaN/Invalid_operation if the result is inexact. */
3437
static void
3438
_mpd_qsub_exact(mpd_t *result, const mpd_t *a, const mpd_t *b,
3439
                const mpd_context_t *ctx, uint32_t *status)
3440
{
3441
    uint32_t workstatus = 0;
licm
    
hosting bitcast 
_mpd_qreciprocal
licm
    
hosting bitcast 
_mpd_base_ndivmod
3442
3443
    mpd_qsub(result, a, b, ctx, &workstatus);
inline
    
mpd_qsub too costly to inline (cost=670, threshold=625) 
_mpd_qsub_exact
inline
    
mpd_qsub will not be inlined into _mpd_qsub_exact 
_mpd_qsub_exact
inline
    
mpd_qsub too costly to inline (cost=670, threshold=625) 
_mpd_qreciprocal
inline
    
mpd_qsub will not be inlined into _mpd_qreciprocal 
_mpd_qreciprocal
inline
    
mpd_qsub too costly to inline (cost=670, threshold=625) 
_mpd_base_ndivmod
inline
    
mpd_qsub will not be inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
3444
    *status |= workstatus;
gvn
               
load of type i32 not eliminated in favor of store because it is clobbered by call 
_mpd_qsub_exact
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qsub_exact
licm
               
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qreciprocal
licm
            
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qreciprocal
gvn
               
load of type i32 not eliminated in favor of store because it is clobbered by call 
_mpd_qreciprocal
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qreciprocal
licm
               
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_base_ndivmod
licm
            
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_base_ndivmod
gvn
               
load of type i32 not eliminated in favor of store because it is clobbered by call 
_mpd_base_ndivmod
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_base_ndivmod
3445
    if (workstatus & (MPD_Inexact|MPD_Rounded|MPD_Clamped)) {
3446
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into _mpd_qsub_exact with cost=130 (threshold=250) 
_mpd_qsub_exact
inline
        
mpd_seterror inlined into _mpd_qsub_exact 
_mpd_qsub_exact
3447
    }
3448
}
3449
3450
/* Add decimal and mpd_ssize_t. */
3451
void
3452
mpd_qadd_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b,
3453
               const mpd_context_t *ctx, uint32_t *status)
3454
{
3455
    mpd_context_t maxcontext;
3456
    MPD_NEW_STATIC(bb,0,0,0,0);
3457
3458
    mpd_maxcontext(&maxcontext);
inline
    
mpd_maxcontext will not be inlined into mpd_qadd_ssize because its definition is unavailable 
mpd_qadd_ssize
3459
    mpd_qsset_ssize(&bb, b, &maxcontext, status);
inline
    
mpd_qsset_ssize can be inlined into mpd_qadd_ssize with cost=65 (threshold=375) 
mpd_qadd_ssize
inline
    
mpd_qsset_ssize inlined into mpd_qadd_ssize 
mpd_qadd_ssize
3460
    mpd_qadd(result, a, &bb, ctx, status);
inline
    
mpd_qadd too costly to inline (cost=660, threshold=625) 
mpd_qadd_ssize
inline
    
mpd_qadd will not be inlined into mpd_qadd_ssize 
mpd_qadd_ssize
3461
    mpd_del(&bb);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qadd_ssize
inline
    
mpd_del inlined into mpd_qadd_ssize 
mpd_qadd_ssize
3462
}
3463
3464
/* Add decimal and mpd_uint_t. */
3465
void
3466
mpd_qadd_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b,
3467
              const mpd_context_t *ctx, uint32_t *status)
3468
{
3469
    mpd_context_t maxcontext;
3470
    MPD_NEW_STATIC(bb,0,0,0,0);
3471
3472
    mpd_maxcontext(&maxcontext);
inline
    
mpd_maxcontext will not be inlined into mpd_qadd_uint because its definition is unavailable 
mpd_qadd_uint
3473
    mpd_qsset_uint(&bb, b, &maxcontext, status);
inline
    
mpd_qsset_uint can be inlined into mpd_qadd_uint with cost=45 (threshold=375) 
mpd_qadd_uint
inline
    
mpd_qsset_uint inlined into mpd_qadd_uint 
mpd_qadd_uint
3474
    mpd_qadd(result, a, &bb, ctx, status);
inline
    
mpd_qadd too costly to inline (cost=660, threshold=625) 
mpd_qadd_uint
inline
    
mpd_qadd will not be inlined into mpd_qadd_uint 
mpd_qadd_uint
inline
    
mpd_qadd too costly to inline (cost=660, threshold=625) 
mpd_qadd_u32
inline
    
mpd_qadd will not be inlined into mpd_qadd_u32 
mpd_qadd_u32
inline
    
mpd_qadd too costly to inline (cost=660, threshold=625) 
mpd_qadd_u64
inline
    
mpd_qadd will not be inlined into mpd_qadd_u64 
mpd_qadd_u64
3475
    mpd_del(&bb);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qadd_uint
inline
    
mpd_del inlined into mpd_qadd_uint 
mpd_qadd_uint
3476
}
3477
3478
/* Subtract mpd_ssize_t from decimal. */
3479
void
3480
mpd_qsub_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b,
3481
               const mpd_context_t *ctx, uint32_t *status)
3482
{
3483
    mpd_context_t maxcontext;
3484
    MPD_NEW_STATIC(bb,0,0,0,0);
3485
3486
    mpd_maxcontext(&maxcontext);
inline
    
mpd_maxcontext will not be inlined into mpd_qsub_ssize because its definition is unavailable 
mpd_qsub_ssize
3487
    mpd_qsset_ssize(&bb, b, &maxcontext, status);
inline
    
mpd_qsset_ssize can be inlined into mpd_qsub_ssize with cost=65 (threshold=375) 
mpd_qsub_ssize
inline
    
mpd_qsset_ssize inlined into mpd_qsub_ssize 
mpd_qsub_ssize
3488
    mpd_qsub(result, a, &bb, ctx, status);
inline
    
mpd_qsub too costly to inline (cost=670, threshold=625) 
mpd_qsub_ssize
inline
    
mpd_qsub will not be inlined into mpd_qsub_ssize 
mpd_qsub_ssize
3489
    mpd_del(&bb);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qsub_ssize
inline
    
mpd_del inlined into mpd_qsub_ssize 
mpd_qsub_ssize
3490
}
3491
3492
/* Subtract mpd_uint_t from decimal. */
3493
void
3494
mpd_qsub_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b,
3495
              const mpd_context_t *ctx, uint32_t *status)
3496
{
3497
    mpd_context_t maxcontext;
3498
    MPD_NEW_STATIC(bb,0,0,0,0);
3499
3500
    mpd_maxcontext(&maxcontext);
inline
    
mpd_maxcontext will not be inlined into mpd_qsub_uint because its definition is unavailable 
mpd_qsub_uint
3501
    mpd_qsset_uint(&bb, b, &maxcontext, status);
inline
    
mpd_qsset_uint can be inlined into mpd_qsub_uint with cost=45 (threshold=375) 
mpd_qsub_uint
inline
    
mpd_qsset_uint inlined into mpd_qsub_uint 
mpd_qsub_uint
3502
    mpd_qsub(result, a, &bb, ctx, status);
inline
    
mpd_qsub too costly to inline (cost=670, threshold=625) 
mpd_qsub_uint
inline
    
mpd_qsub will not be inlined into mpd_qsub_uint 
mpd_qsub_uint
inline
    
mpd_qsub too costly to inline (cost=670, threshold=625) 
mpd_qsub_u32
inline
    
mpd_qsub will not be inlined into mpd_qsub_u32 
mpd_qsub_u32
inline
    
mpd_qsub too costly to inline (cost=670, threshold=625) 
mpd_qsub_u64
inline
    
mpd_qsub will not be inlined into mpd_qsub_u64 
mpd_qsub_u64
3503
    mpd_del(&bb);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qsub_uint
inline
    
mpd_del inlined into mpd_qsub_uint 
mpd_qsub_uint
3504
}
3505
3506
/* Add decimal and int32_t. */
3507
void
3508
mpd_qadd_i32(mpd_t *result, const mpd_t *a, int32_t b,
3509
             const mpd_context_t *ctx, uint32_t *status)
3510
{
3511
    mpd_qadd_ssize(result, a, b, ctx, status);
inline
    
mpd_qadd_ssize too costly to inline (cost=255, threshold=250) 
mpd_qadd_i32
inline
    
mpd_qadd_ssize will not be inlined into mpd_qadd_i32 
mpd_qadd_i32
3512
}
3513
3514
/* Add decimal and uint32_t. */
3515
void
3516
mpd_qadd_u32(mpd_t *result, const mpd_t *a, uint32_t b,
3517
             const mpd_context_t *ctx, uint32_t *status)
3518
{
3519
    mpd_qadd_uint(result, a, b, ctx, status);
inline
    
mpd_qadd_uint can be inlined into mpd_qadd_u32 with cost=235 (threshold=250) 
mpd_qadd_u32
inline
    
mpd_qadd_uint inlined into mpd_qadd_u32 
mpd_qadd_u32
3520
}
3521
3522
#ifdef CONFIG_64
3523
/* Add decimal and int64_t. */
3524
void
3525
mpd_qadd_i64(mpd_t *result, const mpd_t *a, int64_t b,
3526
             const mpd_context_t *ctx, uint32_t *status)
3527
{
3528
    mpd_qadd_ssize(result, a, b, ctx, status);
inline
    
mpd_qadd_ssize too costly to inline (cost=255, threshold=250) 
mpd_qadd_i64
inline
    
mpd_qadd_ssize will not be inlined into mpd_qadd_i64 
mpd_qadd_i64
3529
}
3530
3531
/* Add decimal and uint64_t. */
3532
void
3533
mpd_qadd_u64(mpd_t *result, const mpd_t *a, uint64_t b,
3534
             const mpd_context_t *ctx, uint32_t *status)
3535
{
3536
    mpd_qadd_uint(result, a, b, ctx, status);
inline
    
mpd_qadd_uint can be inlined into mpd_qadd_u64 with cost=235 (threshold=250) 
mpd_qadd_u64
inline
    
mpd_qadd_uint inlined into mpd_qadd_u64 
mpd_qadd_u64
3537
}
3538
#elif !defined(LEGACY_COMPILER)
3539
/* Add decimal and int64_t. */
3540
void
3541
mpd_qadd_i64(mpd_t *result, const mpd_t *a, int64_t b,
3542
             const mpd_context_t *ctx, uint32_t *status)
3543
{
3544
    mpd_context_t maxcontext;
3545
    MPD_NEW_STATIC(bb,0,0,0,0);
3546
3547
    mpd_maxcontext(&maxcontext);
3548
    mpd_qset_i64(&bb, b, &maxcontext, status);
3549
    mpd_qadd(result, a, &bb, ctx, status);
3550
    mpd_del(&bb);
3551
}
3552
3553
/* Add decimal and uint64_t. */
3554
void
3555
mpd_qadd_u64(mpd_t *result, const mpd_t *a, uint64_t b,
3556
             const mpd_context_t *ctx, uint32_t *status)
3557
{
3558
    mpd_context_t maxcontext;
3559
    MPD_NEW_STATIC(bb,0,0,0,0);
3560
3561
    mpd_maxcontext(&maxcontext);
3562
    mpd_qset_u64(&bb, b, &maxcontext, status);
3563
    mpd_qadd(result, a, &bb, ctx, status);
3564
    mpd_del(&bb);
3565
}
3566
#endif
3567
3568
/* Subtract int32_t from decimal. */
3569
void
3570
mpd_qsub_i32(mpd_t *result, const mpd_t *a, int32_t b,
3571
             const mpd_context_t *ctx, uint32_t *status)
3572
{
3573
    mpd_qsub_ssize(result, a, b, ctx, status);
inline
    
mpd_qsub_ssize too costly to inline (cost=255, threshold=250) 
mpd_qsub_i32
inline
    
mpd_qsub_ssize will not be inlined into mpd_qsub_i32 
mpd_qsub_i32
3574
}
3575
3576
/* Subtract uint32_t from decimal. */
3577
void
3578
mpd_qsub_u32(mpd_t *result, const mpd_t *a, uint32_t b,
3579
             const mpd_context_t *ctx, uint32_t *status)
3580
{
3581
    mpd_qsub_uint(result, a, b, ctx, status);
inline
    
mpd_qsub_uint can be inlined into mpd_qsub_u32 with cost=235 (threshold=250) 
mpd_qsub_u32
inline
    
mpd_qsub_uint inlined into mpd_qsub_u32 
mpd_qsub_u32
3582
}
3583
3584
#ifdef CONFIG_64
3585
/* Subtract int64_t from decimal. */
3586
void
3587
mpd_qsub_i64(mpd_t *result, const mpd_t *a, int64_t b,
3588
             const mpd_context_t *ctx, uint32_t *status)
3589
{
3590
    mpd_qsub_ssize(result, a, b, ctx, status);
inline
    
mpd_qsub_ssize too costly to inline (cost=255, threshold=250) 
mpd_qsub_i64
inline
    
mpd_qsub_ssize will not be inlined into mpd_qsub_i64 
mpd_qsub_i64
3591
}
3592
3593
/* Subtract uint64_t from decimal. */
3594
void
3595
mpd_qsub_u64(mpd_t *result, const mpd_t *a, uint64_t b,
3596
             const mpd_context_t *ctx, uint32_t *status)
3597
{
3598
    mpd_qsub_uint(result, a, b, ctx, status);
inline
    
mpd_qsub_uint can be inlined into mpd_qsub_u64 with cost=235 (threshold=250) 
mpd_qsub_u64
inline
    
mpd_qsub_uint inlined into mpd_qsub_u64 
mpd_qsub_u64
3599
}
3600
#elif !defined(LEGACY_COMPILER)
3601
/* Subtract int64_t from decimal. */
3602
void
3603
mpd_qsub_i64(mpd_t *result, const mpd_t *a, int64_t b,
3604
             const mpd_context_t *ctx, uint32_t *status)
3605
{
3606
    mpd_context_t maxcontext;
3607
    MPD_NEW_STATIC(bb,0,0,0,0);
3608
3609
    mpd_maxcontext(&maxcontext);
3610
    mpd_qset_i64(&bb, b, &maxcontext, status);
3611
    mpd_qsub(result, a, &bb, ctx, status);
3612
    mpd_del(&bb);
3613
}
3614
3615
/* Subtract uint64_t from decimal. */
3616
void
3617
mpd_qsub_u64(mpd_t *result, const mpd_t *a, uint64_t b,
3618
             const mpd_context_t *ctx, uint32_t *status)
3619
{
3620
    mpd_context_t maxcontext;
3621
    MPD_NEW_STATIC(bb,0,0,0,0);
3622
3623
    mpd_maxcontext(&maxcontext);
3624
    mpd_qset_u64(&bb, b, &maxcontext, status);
3625
    mpd_qsub(result, a, &bb, ctx, status);
3626
    mpd_del(&bb);
3627
}
3628
#endif
3629
3630
3631
/* Divide infinities. */
3632
static void
3633
_mpd_qdiv_inf(mpd_t *result, const mpd_t *a, const mpd_t *b,
3634
              const mpd_context_t *ctx, uint32_t *status)
3635
{
3636
    if (mpd_isinfinite(a)) {
inline
        
mpd_isinfinite should always be inlined (cost=always) 
_mpd_qdiv_inf
inline
        
mpd_isinfinite inlined into _mpd_qdiv_inf 
_mpd_qdiv_inf
3637
        if (mpd_isinfinite(b)) {
inline
            
mpd_isinfinite should always be inlined (cost=always) 
_mpd_qdiv_inf
inline
            
mpd_isinfinite inlined into _mpd_qdiv_inf 
_mpd_qdiv_inf
3638
            mpd_seterror(result, MPD_Invalid_operation, status);
inline
            
mpd_seterror can be inlined into _mpd_qdiv_inf with cost=130 (threshold=250) 
_mpd_qdiv_inf
inline
            
mpd_seterror inlined into _mpd_qdiv_inf 
_mpd_qdiv_inf
3639
            return;
3640
        }
3641
        mpd_setspecial(result, mpd_sign(a)^mpd_sign(b), MPD_INF);
inline
        
mpd_setspecial can be inlined into _mpd_qdiv_inf with cost=120 (threshold=250) 
_mpd_qdiv_inf
inline
        
mpd_setspecial inlined into _mpd_qdiv_inf 
_mpd_qdiv_inf
inline
                               
mpd_sign should always be inlined (cost=always) 
_mpd_qdiv_inf
inline
                               
mpd_sign inlined into _mpd_qdiv_inf 
_mpd_qdiv_inf
inline
                                           
mpd_sign should always be inlined (cost=always) 
_mpd_qdiv_inf
inline
                                           
mpd_sign inlined into _mpd_qdiv_inf 
_mpd_qdiv_inf
3642
        return;
3643
    }
3644
    assert(mpd_isinfinite(b));
3645
    _settriple(result, mpd_sign(a)^mpd_sign(b), 0, mpd_etiny(ctx));
inline
    
_settriple can be inlined into _mpd_qdiv_inf with cost=180 (threshold=250) 
_mpd_qdiv_inf
inline
    
_settriple inlined into _mpd_qdiv_inf 
_mpd_qdiv_inf
inline
                                                   
mpd_etiny should always be inlined (cost=always) 
_mpd_qdiv_inf
inline
                                                   
mpd_etiny inlined into _mpd_qdiv_inf 
_mpd_qdiv_inf
inline
                                   
mpd_sign should always be inlined (cost=always) 
_mpd_qdiv_inf
inline
                                   
mpd_sign inlined into _mpd_qdiv_inf 
_mpd_qdiv_inf
inline
                       
mpd_sign should always be inlined (cost=always) 
_mpd_qdiv_inf
inline
                       
mpd_sign inlined into _mpd_qdiv_inf 
_mpd_qdiv_inf
3646
    *status |= MPD_Clamped;
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qdiv_inf
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qdiv
3647
}
3648
3649
enum {NO_IDEAL_EXP, SET_IDEAL_EXP};
3650
/* Divide a by b. */
3651
static void
3652
_mpd_qdiv(int action, mpd_t *q, const mpd_t *a, const mpd_t *b,
3653
          const mpd_context_t *ctx, uint32_t *status)
3654
{
3655
    MPD_NEW_STATIC(aligned,0,0,0,0);
3656
    mpd_uint_t ld;
3657
    mpd_ssize_t shift, exp, tz;
3658
    mpd_ssize_t newsize;
3659
    mpd_ssize_t ideal_exp;
3660
    mpd_uint_t rem;
3661
    uint8_t sign_a = mpd_sign(a);
inline
                     
mpd_sign should always be inlined (cost=always) 
_mpd_qdiv
inline
                     
mpd_sign inlined into _mpd_qdiv 
_mpd_qdiv
3662
    uint8_t sign_b = mpd_sign(b);
inline
                     
mpd_sign should always be inlined (cost=always) 
_mpd_qdiv
inline
                     
mpd_sign inlined into _mpd_qdiv 
_mpd_qdiv
3663
3664
3665
    if (mpd_isspecial(a) || mpd_isspecial(b)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
_mpd_qdiv
inline
        
mpd_isspecial inlined into _mpd_qdiv 
_mpd_qdiv
inline
                            
mpd_isspecial should always be inlined (cost=always) 
_mpd_qdiv
inline
                            
mpd_isspecial inlined into _mpd_qdiv 
_mpd_qdiv
3666
        if (mpd_qcheck_nans(q, a, b, ctx, status)) {
inline
            
mpd_qcheck_nans too costly to inline (cost=385, threshold=250) 
_mpd_qdiv
inline
            
mpd_qcheck_nans will not be inlined into _mpd_qdiv 
_mpd_qdiv
3667
            return;
3668
        }
3669
        _mpd_qdiv_inf(q, a, b, ctx, status);
inline
        
_mpd_qdiv_inf can be inlined into _mpd_qdiv with cost=-14155 (threshold=250) 
_mpd_qdiv
inline
        
_mpd_qdiv_inf inlined into _mpd_qdiv 
_mpd_qdiv
3670
        return;
3671
    }
3672
    if (mpd_iszerocoeff(b)) {
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
_mpd_qdiv
inline
        
mpd_iszerocoeff inlined into _mpd_qdiv 
_mpd_qdiv
3673
        if (mpd_iszerocoeff(a)) {
inline
            
mpd_iszerocoeff should always be inlined (cost=always) 
_mpd_qdiv
inline
            
mpd_iszerocoeff inlined into _mpd_qdiv 
_mpd_qdiv
3674
            mpd_seterror(q, MPD_Division_undefined, status);
inline
            
mpd_seterror can be inlined into _mpd_qdiv with cost=130 (threshold=250) 
_mpd_qdiv
inline
            
mpd_seterror inlined into _mpd_qdiv 
_mpd_qdiv
3675
        }
3676
        else {
3677
            mpd_setspecial(q, sign_a^sign_b, MPD_INF);
inline
            
mpd_setspecial can be inlined into _mpd_qdiv with cost=120 (threshold=250) 
_mpd_qdiv
inline
            
mpd_setspecial inlined into _mpd_qdiv 
_mpd_qdiv
3678
            *status |= MPD_Division_by_zero;
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
_mpd_qdiv
3679
        }
3680
        return;
3681
    }
3682
    if (mpd_iszerocoeff(a)) {
3683
        exp = a->exp - b->exp;
3684
        _settriple(q, sign_a^sign_b, 0, exp);
inline
        
_settriple can be inlined into _mpd_qdiv with cost=180 (threshold=250) 
_mpd_qdiv
inline
        
_settriple inlined into _mpd_qdiv 
_mpd_qdiv
3685
        mpd_qfinalize(q, ctx, status);
inline
        
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
_mpd_qdiv
inline
        
mpd_qfinalize will not be inlined into _mpd_qdiv 
_mpd_qdiv
3686
        return;
3687
    }
3688
3689
    shift = (b->digits - a->digits) + ctx->prec + 1;
3690
    ideal_exp = a->exp - b->exp;
3691
    exp = ideal_exp - shift;
3692
    if (shift > 0) {
3693
        if (!mpd_qshiftl(&aligned, a, shift, status)) {
inline
             
mpd_qshiftl too costly to inline (cost=575, threshold=250) 
_mpd_qdiv
inline
             
mpd_qshiftl will not be inlined into _mpd_qdiv 
_mpd_qdiv
3694
            mpd_seterror(q, MPD_Malloc_error, status);
inline
            
mpd_seterror can be inlined into _mpd_qdiv with cost=130 (threshold=250) 
_mpd_qdiv
inline
            
mpd_seterror inlined into _mpd_qdiv 
_mpd_qdiv
3695
            goto finish;
3696
        }
3697
        a = &aligned;
3698
    }
3699
    else if (shift < 0) {
3700
        shift = -shift;
3701
        if (!mpd_qshiftl(&aligned, b, shift, status)) {
inline
             
mpd_qshiftl too costly to inline (cost=575, threshold=250) 
_mpd_qdiv
inline
             
mpd_qshiftl will not be inlined into _mpd_qdiv 
_mpd_qdiv
3702
            mpd_seterror(q, MPD_Malloc_error, status);
inline
            
mpd_seterror can be inlined into _mpd_qdiv with cost=130 (threshold=250) 
_mpd_qdiv
inline
            
mpd_seterror inlined into _mpd_qdiv 
_mpd_qdiv
3703
            goto finish;
3704
        }
3705
        b = &aligned;
3706
    }
3707
3708
3709
    newsize = a->len - b->len + 1;
gvn
                 
load of type i64 not eliminated because it is clobbered by call 
_mpd_qdiv
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
_mpd_qdiv
3710
    if ((q != b && q != a) || (q == b && newsize > b->len)) {
3711
        if (!mpd_qresize(q, newsize, status)) {
inline
             
mpd_qresize should always be inlined (cost=always) 
_mpd_qdiv
inline
             
mpd_qresize inlined into _mpd_qdiv 
_mpd_qdiv
3712
            mpd_seterror(q, MPD_Malloc_error, status);
inline
            
mpd_seterror can be inlined into _mpd_qdiv with cost=130 (threshold=250) 
_mpd_qdiv
inline
            
mpd_seterror inlined into _mpd_qdiv 
_mpd_qdiv
3713
            goto finish;
3714
        }
3715
    }
3716
3717
3718
    if (b->len == 1) {
gvn
           
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qdiv
gvn
           
load eliminated by PRE 
_mpd_qdiv
3719
        rem = _mpd_shortdiv(q->data, a->data, a->len, b->data[0]);
inline
              
_mpd_shortdiv will not be inlined into _mpd_qdiv because its definition is unavailable 
_mpd_qdiv
gvn
                               
load of type i64* not eliminated because it is clobbered by call 
_mpd_qdiv
gvn
                                        
load of type i64* not eliminated because it is clobbered by call 
_mpd_qdiv
gvn
                                                 
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qdiv
gvn
                                                         
load of type i64* not eliminated because it is clobbered by call 
_mpd_qdiv
3720
    }
3721
    else if (b->len <= MPD_NEWTONDIV_CUTOFF) {
3722
        int ret = _mpd_basedivmod(q->data, NULL, a->data, b->data,
inline
                  
_mpd_basedivmod will not be inlined into _mpd_qdiv because its definition is unavailable 
_mpd_qdiv
gvn
                                     
load of type i64* not eliminated because it is clobbered by call 
_mpd_qdiv
gvn
                                                    
load of type i64* not eliminated because it is clobbered by call 
_mpd_qdiv
gvn
                                                             
load of type i64* not eliminated because it is clobbered by call 
_mpd_qdiv
3723
                                  a->len, b->len);
gvn
                                     
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qdiv
3724
        if (ret < 0) {
3725
            mpd_seterror(q, MPD_Malloc_error, status);
inline
            
mpd_seterror can be inlined into _mpd_qdiv with cost=130 (threshold=250) 
_mpd_qdiv
inline
            
mpd_seterror inlined into _mpd_qdiv 
_mpd_qdiv
3726
            goto finish;
3727
        }
3728
        rem = ret;
3729
    }
3730
    else {
3731
        MPD_NEW_STATIC(r,0,0,0,0);
3732
        _mpd_base_ndivmod(q, &r, a, b, status);
inline
        
_mpd_base_ndivmod too costly to inline (cost=630, threshold=625) 
_mpd_qdiv
inline
        
_mpd_base_ndivmod will not be inlined into _mpd_qdiv 
_mpd_qdiv
3733
        if (mpd_isspecial(q) || mpd_isspecial(&r)) {
inline
            
mpd_isspecial should always be inlined (cost=always) 
_mpd_qdiv
inline
            
mpd_isspecial inlined into _mpd_qdiv 
_mpd_qdiv
inline
                                
mpd_isspecial should always be inlined (cost=always) 
_mpd_qdiv
inline
                                
mpd_isspecial inlined into _mpd_qdiv 
_mpd_qdiv
3734
            mpd_setspecial(q, MPD_POS, MPD_NAN);
inline
            
mpd_setspecial can be inlined into _mpd_qdiv with cost=115 (threshold=250) 
_mpd_qdiv
inline
            
mpd_setspecial inlined into _mpd_qdiv 
_mpd_qdiv
3735
            mpd_del(&r);
inline
            
mpd_del should always be inlined (cost=always) 
_mpd_qdiv
inline
            
mpd_del inlined into _mpd_qdiv 
_mpd_qdiv
3736
            goto finish;
3737
        }
3738
        rem = !mpd_iszerocoeff(&r);
inline
               
mpd_iszerocoeff should always be inlined (cost=always) 
_mpd_qdiv
inline
               
mpd_iszerocoeff inlined into _mpd_qdiv 
_mpd_qdiv
3739
        mpd_del(&r);
inline
        
mpd_del should always be inlined (cost=always) 
_mpd_qdiv
inline
        
mpd_del inlined into _mpd_qdiv 
_mpd_qdiv
3740
        newsize = q->len;
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
_mpd_qdiv
3741
    }
3742
3743
    newsize = _mpd_real_size(q->data, newsize);
inline
              
_mpd_real_size can be inlined into _mpd_qdiv with cost=-5 (threshold=325) 
_mpd_qdiv
inline
              
_mpd_real_size inlined into _mpd_qdiv 
_mpd_qdiv
gvn
                                
load of type i64* not eliminated because it is clobbered by call 
_mpd_qdiv
3744
    /* resize to smaller cannot fail */
3745
    mpd_qresize(q, newsize, status);
inline
    
mpd_qresize should always be inlined (cost=always) 
_mpd_qdiv
inline
    
mpd_qresize inlined into _mpd_qdiv 
_mpd_qdiv
3746
    mpd_set_flags(q, sign_a^sign_b);
inline
    
mpd_set_flags should always be inlined (cost=always) 
_mpd_qdiv
inline
    
mpd_set_flags inlined into _mpd_qdiv 
_mpd_qdiv
3747
    q->len = newsize;
3748
    mpd_setdigits(q);
inline
    
mpd_setdigits can be inlined into _mpd_qdiv with cost=295 (threshold=325) 
_mpd_qdiv
inline
    
mpd_setdigits inlined into _mpd_qdiv 
_mpd_qdiv
3749
3750
    shift = ideal_exp - exp;
3751
    if (rem) {
3752
        ld = mpd_lsd(q->data[0]);
inline
             
mpd_lsd should always be inlined (cost=always) 
_mpd_qdiv
inline
             
mpd_lsd inlined into _mpd_qdiv 
_mpd_qdiv
gvn
                        
load of type i64* eliminated in favor of load 
_mpd_qdiv
gvn
                     
load of type i64 not eliminated because it is clobbered by store 
_mpd_qdiv
3753
        if (ld == 0 || ld == 5) {
3754
            q->data[0] += 1;
gvn
               
load of type i64* eliminated in favor of load 
_mpd_qdiv
gvn
                       
load of type i64 eliminated in favor of load 
_mpd_qdiv
3755
        }
3756
    }
3757
    else if (action == SET_IDEAL_EXP && shift > 0) {
3758
        tz = mpd_trail_zeros(q);
inline
             
mpd_trail_zeros can be inlined into _mpd_qdiv with cost=65 (threshold=250) 
_mpd_qdiv
inline
             
mpd_trail_zeros inlined into _mpd_qdiv 
_mpd_qdiv
3759
        shift = (tz > shift) ? shift : tz;
3760
        mpd_qshiftr_inplace(q, shift);
inline
        
mpd_qshiftr_inplace too costly to inline (cost=475, threshold=250) 
_mpd_qdiv
inline
        
mpd_qshiftr_inplace will not be inlined into _mpd_qdiv 
_mpd_qdiv
3761
        exp += shift;
3762
    }
3763
3764
    q->exp = exp;
3765
3766
3767
finish:
3768
    mpd_del(&aligned);
inline
    
mpd_del should always be inlined (cost=always) 
_mpd_qdiv
inline
    
mpd_del inlined into _mpd_qdiv 
_mpd_qdiv
3769
    mpd_qfinalize(q, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
_mpd_qdiv
inline
    
mpd_qfinalize will not be inlined into _mpd_qdiv 
_mpd_qdiv
3770
}
3771
3772
/* Divide a by b. */
3773
void
3774
mpd_qdiv(mpd_t *q, const mpd_t *a, const mpd_t *b,
3775
         const mpd_context_t *ctx, uint32_t *status)
3776
{
3777
    _mpd_qdiv(SET_IDEAL_EXP, q, a, b, ctx, status);
inline
    
_mpd_qdiv too costly to inline (cost=675, threshold=625) 
mpd_qdiv
inline
    
_mpd_qdiv will not be inlined into mpd_qdiv 
mpd_qdiv
inline
    
_mpd_qdiv too costly to inline (cost=675, threshold=625) 
mpd_qdiv_ssize
inline
    
_mpd_qdiv will not be inlined into mpd_qdiv_ssize 
mpd_qdiv_ssize
inline
    
_mpd_qdiv too costly to inline (cost=675, threshold=625) 
mpd_qdiv_uint
inline
    
_mpd_qdiv will not be inlined into mpd_qdiv_uint 
mpd_qdiv_uint
inline
    
_mpd_qdiv too costly to inline (cost=675, threshold=625) 
mpd_qdiv_u32
inline
    
_mpd_qdiv will not be inlined into mpd_qdiv_u32 
mpd_qdiv_u32
inline
    
_mpd_qdiv too costly to inline (cost=675, threshold=625) 
mpd_qdiv_u64
inline
    
_mpd_qdiv will not be inlined into mpd_qdiv_u64 
mpd_qdiv_u64
inline
    
_mpd_qdiv too costly to inline (cost=675, threshold=625) 
_mpd_qexp
inline
    
_mpd_qdiv will not be inlined into _mpd_qexp 
_mpd_qexp
inline
    
_mpd_qdiv too costly to inline (cost=675, threshold=625) 
_mpd_qpow_int
inline
    
_mpd_qdiv will not be inlined into _mpd_qpow_int 
_mpd_qpow_int
inline
    
_mpd_qdiv too costly to inline (cost=675, threshold=625) 
mpd_qpow
inline
    
_mpd_qdiv will not be inlined into mpd_qpow 
mpd_qpow
3778
}
3779
3780
/* Internal function. */
3781
static void
3782
_mpd_qdivmod(mpd_t *q, mpd_t *r, const mpd_t *a, const mpd_t *b,
3783
             const mpd_context_t *ctx, uint32_t *status)
3784
{
3785
    MPD_NEW_STATIC(aligned,0,0,0,0);
3786
    mpd_ssize_t qsize, rsize;
3787
    mpd_ssize_t ideal_exp, expdiff, shift;
3788
    uint8_t sign_a = mpd_sign(a);
inline
                     
mpd_sign should always be inlined (cost=always) 
_mpd_qdivmod
inline
                     
mpd_sign inlined into _mpd_qdivmod 
_mpd_qdivmod
3789
    uint8_t sign_ab = mpd_sign(a)^mpd_sign(b);
inline
                      
mpd_sign should always be inlined (cost=always) 
_mpd_qdivmod
inline
                      
mpd_sign inlined into _mpd_qdivmod 
_mpd_qdivmod
inline
                                  
mpd_sign should always be inlined (cost=always) 
_mpd_qdivmod
inline
                                  
mpd_sign inlined into _mpd_qdivmod 
_mpd_qdivmod
3790
3791
3792
    ideal_exp = (a->exp > b->exp) ?  b->exp : a->exp;
3793
    if (mpd_iszerocoeff(a)) {
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
_mpd_qdivmod
inline
        
mpd_iszerocoeff inlined into _mpd_qdivmod 
_mpd_qdivmod
3794
        if (!mpd_qcopy(r, a, status)) {
inline
             
mpd_qcopy can be inlined into _mpd_qdivmod with cost=215 (threshold=250) 
_mpd_qdivmod
inline
             
mpd_qcopy inlined into _mpd_qdivmod 
_mpd_qdivmod
3795
            goto nanresult; /* GCOV_NOT_REACHED */
3796
        }
3797
        r->exp = ideal_exp;
3798
        _settriple(q, sign_ab, 0, 0);
inline
        
_settriple can be inlined into _mpd_qdivmod with cost=180 (threshold=250) 
_mpd_qdivmod
inline
        
_settriple inlined into _mpd_qdivmod 
_mpd_qdivmod
3799
        return;
3800
    }
3801
3802
    expdiff = mpd_adjexp(a) - mpd_adjexp(b);
inline
              
mpd_adjexp should always be inlined (cost=always) 
_mpd_qdivmod
inline
              
mpd_adjexp inlined into _mpd_qdivmod 
_mpd_qdivmod
inline
                              
mpd_adjexp should always be inlined (cost=always) 
_mpd_qdivmod
inline
                              
mpd_adjexp inlined into _mpd_qdivmod 
_mpd_qdivmod
3803
    if (expdiff < 0) {
3804
        if (a->exp > b->exp) {
3805
            /* positive and less than b->digits - a->digits */
3806
            shift = a->exp - b->exp;
3807
            if (!mpd_qshiftl(r, a, shift, status)) {
inline
                 
mpd_qshiftl too costly to inline (cost=575, threshold=250) 
_mpd_qdivmod
inline
                 
mpd_qshiftl will not be inlined into _mpd_qdivmod 
_mpd_qdivmod
3808
                goto nanresult;
3809
            }
3810
            r->exp = ideal_exp;
3811
        }
3812
        else {
3813
            if (!mpd_qcopy(r, a, status)) {
inline
                 
mpd_qcopy can be inlined into _mpd_qdivmod with cost=215 (threshold=250) 
_mpd_qdivmod
inline
                 
mpd_qcopy inlined into _mpd_qdivmod 
_mpd_qdivmod
3814
                goto nanresult;
3815
            }
3816
        }
3817
        _settriple(q, sign_ab, 0, 0);
inline
        
_settriple can be inlined into _mpd_qdivmod with cost=180 (threshold=250) 
_mpd_qdivmod
inline
        
_settriple inlined into _mpd_qdivmod 
_mpd_qdivmod
3818
        return;
3819
    }
3820
    if (expdiff > ctx->prec) {
3821
        *status |= MPD_Division_impossible;
3822
        goto nanresult;
3823
    }
3824
3825
3826
    /*
3827
     * At this point we have:
3828
     *   (1) 0 <= a->exp + a->digits - b->exp - b->digits <= prec
3829
     *   (2) a->exp - b->exp >= b->digits - a->digits
3830
     *   (3) a->exp - b->exp <= prec + b->digits - a->digits
3831
     */
3832
    if (a->exp != b->exp) {
3833
        shift = a->exp - b->exp;
3834
        if (shift > 0) {
3835
            /* by (3), after the shift a->digits <= prec + b->digits */
3836
            if (!mpd_qshiftl(&aligned, a, shift, status)) {
inline
                 
mpd_qshiftl too costly to inline (cost=575, threshold=250) 
_mpd_qdivmod
inline
                 
mpd_qshiftl will not be inlined into _mpd_qdivmod 
_mpd_qdivmod
3837
                goto nanresult;
3838
            }
3839
            a = &aligned;
3840
        }
3841
        else  {
3842
            shift = -shift;
3843
            /* by (2), after the shift b->digits <= a->digits */
3844
            if (!mpd_qshiftl(&aligned, b, shift, status)) {
inline
                 
mpd_qshiftl too costly to inline (cost=575, threshold=250) 
_mpd_qdivmod
inline
                 
mpd_qshiftl will not be inlined into _mpd_qdivmod 
_mpd_qdivmod
3845
                goto nanresult;
3846
            }
3847
            b = &aligned;
3848
        }
3849
    }
3850
3851
3852
    qsize = a->len - b->len + 1;
gvn
               
load of type i64 not eliminated because it is clobbered by call 
_mpd_qdivmod
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_qdivmod
3853
    if (!(q == a && qsize < a->len) && !(q == b && qsize < b->len)) {
3854
        if (!mpd_qresize(q, qsize, status)) {
inline
             
mpd_qresize should always be inlined (cost=always) 
_mpd_qdivmod
inline
             
mpd_qresize inlined into _mpd_qdivmod 
_mpd_qdivmod
3855
            goto nanresult;
3856
        }
3857
    }
3858
3859
    rsize = b->len;
gvn
               
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qdivmod
gvn
               
load eliminated by PRE 
_mpd_qdivmod
3860
    if (!(r == a && rsize < a->len)) {
gvn
                               
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qdivmod
3861
        if (!mpd_qresize(r, rsize, status)) {
inline
             
mpd_qresize should always be inlined (cost=always) 
_mpd_qdivmod
inline
             
mpd_qresize inlined into _mpd_qdivmod 
_mpd_qdivmod
3862
            goto nanresult;
3863
        }
3864
    }
3865
3866
    if (b->len == 1) {
gvn
           
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qdivmod
gvn
           
load eliminated by PRE 
_mpd_qdivmod
3867
        if (a->len == 1) {
gvn
               
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qdivmod
3868
            _mpd_div_word(&q->data[0], &r->data[0], a->data[0], b->data[0]);
inline
            
_mpd_div_word can be inlined into _mpd_qdivmod with cost=-15025 (threshold=487) 
_mpd_qdivmod
inline
            
_mpd_div_word inlined into _mpd_qdivmod 
_mpd_qdivmod
gvn
                                                                   
load of type i64* not eliminated because it is clobbered by call 
_mpd_qdivmod
gvn
                                                       
load of type i64* not eliminated because it is clobbered by call 
_mpd_qdivmod
gvn
                                           
load of type i64* not eliminated because it is clobbered by call 
_mpd_qdivmod
3869
        }
3870
        else {
3871
            r->data[0] = _mpd_shortdiv(q->data, a->data, a->len, b->data[0]);
inline
                         
_mpd_shortdiv will not be inlined into _mpd_qdivmod because its definition is unavailable 
_mpd_qdivmod
gvn
               
load of type i64* not eliminated because it is clobbered by call 
_mpd_qdivmod
3872
        }
3873
    }
3874
    else if (b->len <= MPD_NEWTONDIV_CUTOFF) {
3875
        int ret;
3876
        ret = _mpd_basedivmod(q->data, r->data, a->data, b->data,
inline
              
_mpd_basedivmod will not be inlined into _mpd_qdivmod because its definition is unavailable 
_mpd_qdivmod
gvn
                                 
load of type i64* not eliminated because it is clobbered by call 
_mpd_qdivmod
gvn
                                          
load of type i64* not eliminated because it is clobbered by call 
_mpd_qdivmod
gvn
                                                   
load of type i64* not eliminated because it is clobbered by call 
_mpd_qdivmod
gvn
                                                            
load of type i64* not eliminated because it is clobbered by call 
_mpd_qdivmod
3877
                              a->len, b->len);
gvn
                                 
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qdivmod
3878
        if (ret == -1) {
3879
            *status |= MPD_Malloc_error;
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
_mpd_qdivmod
3880
            goto nanresult;
3881
        }
3882
    }
3883
    else {
3884
        _mpd_base_ndivmod(q, r, a, b, status);
inline
        
_mpd_base_ndivmod too costly to inline (cost=630, threshold=625) 
_mpd_qdivmod
inline
        
_mpd_base_ndivmod will not be inlined into _mpd_qdivmod 
_mpd_qdivmod
3885
        if (mpd_isspecial(q) || mpd_isspecial(r)) {
inline
            
mpd_isspecial should always be inlined (cost=always) 
_mpd_qdivmod
inline
            
mpd_isspecial inlined into _mpd_qdivmod 
_mpd_qdivmod
inline
                                
mpd_isspecial should always be inlined (cost=always) 
_mpd_qdivmod
inline
                                
mpd_isspecial inlined into _mpd_qdivmod 
_mpd_qdivmod
3886
            goto nanresult;
3887
        }
3888
        qsize = q->len;
gvn
                   
load of type i64 not eliminated because it is clobbered by call 
_mpd_qdivmod
3889
        rsize = r->len;
gvn
                   
load of type i64 not eliminated because it is clobbered by call 
_mpd_qdivmod
3890
    }
3891
3892
    qsize = _mpd_real_size(q->data, qsize);
inline
            
_mpd_real_size can be inlined into _mpd_qdivmod with cost=-5 (threshold=325) 
_mpd_qdivmod
inline
            
_mpd_real_size inlined into _mpd_qdivmod 
_mpd_qdivmod
gvn
                              
load of type i64* not eliminated because it is clobbered by call 
_mpd_qdivmod
3893
    /* resize to smaller cannot fail */
3894
    mpd_qresize(q, qsize, status);
inline
    
mpd_qresize should always be inlined (cost=always) 
_mpd_qdivmod
inline
    
mpd_qresize inlined into _mpd_qdivmod 
_mpd_qdivmod
3895
    q->len = qsize;
3896
    mpd_setdigits(q);
inline
    
mpd_setdigits can be inlined into _mpd_qdivmod with cost=295 (threshold=325) 
_mpd_qdivmod
inline
    
mpd_setdigits inlined into _mpd_qdivmod 
_mpd_qdivmod
3897
    mpd_set_flags(q, sign_ab);
inline
    
mpd_set_flags should always be inlined (cost=always) 
_mpd_qdivmod
inline
    
mpd_set_flags inlined into _mpd_qdivmod 
_mpd_qdivmod
3898
    q->exp = 0;
3899
    if (q->digits > ctx->prec) {
gvn
           
load of type i64 eliminated in favor of add 
_mpd_qdivmod
gvn
                         
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qdivmod
3900
        *status |= MPD_Division_impossible;
gvn
                
load of type i32 not eliminated because it is clobbered by call 
_mpd_qdivmod
3901
        goto nanresult;
3902
    }
3903
3904
    rsize = _mpd_real_size(r->data, rsize);
inline
            
_mpd_real_size can be inlined into _mpd_qdivmod with cost=-5 (threshold=325) 
_mpd_qdivmod
inline
            
_mpd_real_size inlined into _mpd_qdivmod 
_mpd_qdivmod
gvn
                              
load of type i64* not eliminated because it is clobbered by call 
_mpd_qdivmod
3905
    /* resize to smaller cannot fail */
3906
    mpd_qresize(r, rsize, status);
inline
    
mpd_qresize should always be inlined (cost=always) 
_mpd_qdivmod
inline
    
mpd_qresize inlined into _mpd_qdivmod 
_mpd_qdivmod
3907
    r->len = rsize;
3908
    mpd_setdigits(r);
inline
    
mpd_setdigits can be inlined into _mpd_qdivmod with cost=295 (threshold=325) 
_mpd_qdivmod
inline
    
mpd_setdigits inlined into _mpd_qdivmod 
_mpd_qdivmod
3909
    mpd_set_flags(r, sign_a);
inline
    
mpd_set_flags should always be inlined (cost=always) 
_mpd_qdivmod
inline
    
mpd_set_flags inlined into _mpd_qdivmod 
_mpd_qdivmod
3910
    r->exp = ideal_exp;
3911
3912
out:
3913
    mpd_del(&aligned);
inline
    
mpd_del should always be inlined (cost=always) 
_mpd_qdivmod
inline
    
mpd_del inlined into _mpd_qdivmod 
_mpd_qdivmod
3914
    return;
3915
3916
nanresult:
3917
    mpd_setspecial(q, MPD_POS, MPD_NAN);
inline
    
mpd_setspecial can be inlined into _mpd_qdivmod with cost=115 (threshold=250) 
_mpd_qdivmod
inline
    
mpd_setspecial inlined into _mpd_qdivmod 
_mpd_qdivmod
3918
    mpd_setspecial(r, MPD_POS, MPD_NAN);
inline
    
mpd_setspecial can be inlined into _mpd_qdivmod with cost=115 (threshold=250) 
_mpd_qdivmod
inline
    
mpd_setspecial inlined into _mpd_qdivmod 
_mpd_qdivmod
3919
    goto out;
3920
}
3921
3922
/* Integer division with remainder. */
3923
void
3924
mpd_qdivmod(mpd_t *q, mpd_t *r, const mpd_t *a, const mpd_t *b,
3925
            const mpd_context_t *ctx, uint32_t *status)
3926
{
3927
    uint8_t sign = mpd_sign(a)^mpd_sign(b);
inline
                   
mpd_sign should always be inlined (cost=always) 
mpd_qdivmod
inline
                   
mpd_sign inlined into mpd_qdivmod 
mpd_qdivmod
inline
                               
mpd_sign should always be inlined (cost=always) 
mpd_qdivmod
inline
                               
mpd_sign inlined into mpd_qdivmod 
mpd_qdivmod
3928
3929
    if (mpd_isspecial(a) || mpd_isspecial(b)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qdivmod
inline
        
mpd_isspecial inlined into mpd_qdivmod 
mpd_qdivmod
inline
                            
mpd_isspecial should always be inlined (cost=always) 
mpd_qdivmod
inline
                            
mpd_isspecial inlined into mpd_qdivmod 
mpd_qdivmod
3930
        if (mpd_qcheck_nans(q, a, b, ctx, status)) {
inline
            
mpd_qcheck_nans too costly to inline (cost=385, threshold=250) 
mpd_qdivmod
inline
            
mpd_qcheck_nans will not be inlined into mpd_qdivmod 
mpd_qdivmod
3931
            mpd_qcopy(r, q, status);
inline
            
mpd_qcopy can be inlined into mpd_qdivmod with cost=215 (threshold=250) 
mpd_qdivmod
inline
            
mpd_qcopy inlined into mpd_qdivmod 
mpd_qdivmod
3932
            return;
3933
        }
3934
        if (mpd_isinfinite(a)) {
inline
            
mpd_isinfinite should always be inlined (cost=always) 
mpd_qdivmod
inline
            
mpd_isinfinite inlined into mpd_qdivmod 
mpd_qdivmod
3935
            if (mpd_isinfinite(b)) {
inline
                
mpd_isinfinite should always be inlined (cost=always) 
mpd_qdivmod
inline
                
mpd_isinfinite inlined into mpd_qdivmod 
mpd_qdivmod
3936
                mpd_setspecial(q, MPD_POS, MPD_NAN);
inline
                
mpd_setspecial can be inlined into mpd_qdivmod with cost=115 (threshold=250) 
mpd_qdivmod
inline
                
mpd_setspecial inlined into mpd_qdivmod 
mpd_qdivmod
3937
            }
3938
            else {
3939
                mpd_setspecial(q, sign, MPD_INF);
inline
                
mpd_setspecial can be inlined into mpd_qdivmod with cost=120 (threshold=250) 
mpd_qdivmod
inline
                
mpd_setspecial inlined into mpd_qdivmod 
mpd_qdivmod
3940
            }
3941
            mpd_setspecial(r, MPD_POS, MPD_NAN);
inline
            
mpd_setspecial can be inlined into mpd_qdivmod with cost=115 (threshold=250) 
mpd_qdivmod
inline
            
mpd_setspecial inlined into mpd_qdivmod 
mpd_qdivmod
3942
            *status |= MPD_Invalid_operation;
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
mpd_qdivmod
3943
            return;
3944
        }
3945
        if (mpd_isinfinite(b)) {
3946
            if (!mpd_qcopy(r, a, status)) {
inline
                 
mpd_qcopy can be inlined into mpd_qdivmod with cost=215 (threshold=250) 
mpd_qdivmod
inline
                 
mpd_qcopy inlined into mpd_qdivmod 
mpd_qdivmod
3947
                mpd_seterror(q, MPD_Malloc_error, status);
inline
                
mpd_seterror can be inlined into mpd_qdivmod with cost=130 (threshold=250) 
mpd_qdivmod
inline
                
mpd_seterror inlined into mpd_qdivmod 
mpd_qdivmod
3948
                return;
3949
            }
3950
            mpd_qfinalize(r, ctx, status);
inline
            
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qdivmod
inline
            
mpd_qfinalize will not be inlined into mpd_qdivmod 
mpd_qdivmod
3951
            _settriple(q, sign, 0, 0);
inline
            
_settriple can be inlined into mpd_qdivmod with cost=180 (threshold=250) 
mpd_qdivmod
inline
            
_settriple inlined into mpd_qdivmod 
mpd_qdivmod
3952
            return;
3953
        }
3954
        /* debug */
3955
        abort(); /* GCOV_NOT_REACHED */
inline
        
abort will not be inlined into mpd_qdivmod because its definition is unavailable 
mpd_qdivmod
3956
    }
3957
    if (mpd_iszerocoeff(b)) {
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qdivmod
inline
        
mpd_iszerocoeff inlined into mpd_qdivmod 
mpd_qdivmod
3958
        if (mpd_iszerocoeff(a)) {
inline
            
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qdivmod
inline
            
mpd_iszerocoeff inlined into mpd_qdivmod 
mpd_qdivmod
3959
            mpd_setspecial(q, MPD_POS, MPD_NAN);
inline
            
mpd_setspecial can be inlined into mpd_qdivmod with cost=115 (threshold=250) 
mpd_qdivmod
inline
            
mpd_setspecial inlined into mpd_qdivmod 
mpd_qdivmod
3960
            mpd_setspecial(r, MPD_POS, MPD_NAN);
inline
            
mpd_setspecial can be inlined into mpd_qdivmod with cost=115 (threshold=250) 
mpd_qdivmod
inline
            
mpd_setspecial inlined into mpd_qdivmod 
mpd_qdivmod
3961
            *status |= MPD_Division_undefined;
3962
        }
3963
        else {
3964
            mpd_setspecial(q, sign, MPD_INF);
inline
            
mpd_setspecial can be inlined into mpd_qdivmod with cost=120 (threshold=250) 
mpd_qdivmod
inline
            
mpd_setspecial inlined into mpd_qdivmod 
mpd_qdivmod
3965
            mpd_setspecial(r, MPD_POS, MPD_NAN);
inline
            
mpd_setspecial can be inlined into mpd_qdivmod with cost=115 (threshold=250) 
mpd_qdivmod
inline
            
mpd_setspecial inlined into mpd_qdivmod 
mpd_qdivmod
3966
            *status |= (MPD_Division_by_zero|MPD_Invalid_operation);
3967
        }
3968
        return;
3969
    }
3970
3971
    _mpd_qdivmod(q, r, a, b, ctx, status);
inline
    
_mpd_qdivmod too costly to inline (cost=630, threshold=625) 
mpd_qdivmod
inline
    
_mpd_qdivmod will not be inlined into mpd_qdivmod 
mpd_qdivmod
3972
    mpd_qfinalize(q, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qdivmod
inline
    
mpd_qfinalize will not be inlined into mpd_qdivmod 
mpd_qdivmod
3973
    mpd_qfinalize(r, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qdivmod
inline
    
mpd_qfinalize will not be inlined into mpd_qdivmod 
mpd_qdivmod
3974
}
3975
3976
void
3977
mpd_qdivint(mpd_t *q, const mpd_t *a, const mpd_t *b,
3978
            const mpd_context_t *ctx, uint32_t *status)
3979
{
3980
    MPD_NEW_STATIC(r,0,0,0,0);
3981
    uint8_t sign = mpd_sign(a)^mpd_sign(b);
inline
                   
mpd_sign should always be inlined (cost=always) 
mpd_qdivint
inline
                   
mpd_sign inlined into mpd_qdivint 
mpd_qdivint
inline
                               
mpd_sign should always be inlined (cost=always) 
mpd_qdivint
inline
                               
mpd_sign inlined into mpd_qdivint 
mpd_qdivint
3982
3983
    if (mpd_isspecial(a) || mpd_isspecial(b)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qdivint
inline
        
mpd_isspecial inlined into mpd_qdivint 
mpd_qdivint
inline
                            
mpd_isspecial should always be inlined (cost=always) 
mpd_qdivint
inline
                            
mpd_isspecial inlined into mpd_qdivint 
mpd_qdivint
3984
        if (mpd_qcheck_nans(q, a, b, ctx, status)) {
inline
            
mpd_qcheck_nans too costly to inline (cost=385, threshold=250) 
mpd_qdivint
inline
            
mpd_qcheck_nans will not be inlined into mpd_qdivint 
mpd_qdivint
3985
            return;
3986
        }
3987
        if (mpd_isinfinite(a) && mpd_isinfinite(b)) {
inline
            
mpd_isinfinite should always be inlined (cost=always) 
mpd_qdivint
inline
            
mpd_isinfinite inlined into mpd_qdivint 
mpd_qdivint
inline
                                 
mpd_isinfinite should always be inlined (cost=always) 
mpd_qdivint
inline
                                 
mpd_isinfinite inlined into mpd_qdivint 
mpd_qdivint
3988
            mpd_seterror(q, MPD_Invalid_operation, status);
inline
            
mpd_seterror can be inlined into mpd_qdivint with cost=130 (threshold=250) 
mpd_qdivint
inline
            
mpd_seterror inlined into mpd_qdivint 
mpd_qdivint
3989
            return;
3990
        }
3991
        if (mpd_isinfinite(a)) {
inline
            
mpd_isinfinite should always be inlined (cost=always) 
mpd_qdivint
inline
            
mpd_isinfinite inlined into mpd_qdivint 
mpd_qdivint
3992
            mpd_setspecial(q, sign, MPD_INF);
inline
            
mpd_setspecial can be inlined into mpd_qdivint with cost=120 (threshold=250) 
mpd_qdivint
inline
            
mpd_setspecial inlined into mpd_qdivint 
mpd_qdivint
3993
            return;
3994
        }
3995
        if (mpd_isinfinite(b)) {
inline
            
mpd_isinfinite should always be inlined (cost=always) 
mpd_qdivint
inline
            
mpd_isinfinite inlined into mpd_qdivint 
mpd_qdivint
3996
            _settriple(q, sign, 0, 0);
inline
            
_settriple can be inlined into mpd_qdivint with cost=180 (threshold=250) 
mpd_qdivint
inline
            
_settriple inlined into mpd_qdivint 
mpd_qdivint
3997
            return;
3998
        }
3999
        /* debug */
4000
        abort(); /* GCOV_NOT_REACHED */
inline
        
abort will not be inlined into mpd_qdivint because its definition is unavailable 
mpd_qdivint
4001
    }
4002
    if (mpd_iszerocoeff(b)) {
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qdivint
inline
        
mpd_iszerocoeff inlined into mpd_qdivint 
mpd_qdivint
4003
        if (mpd_iszerocoeff(a)) {
inline
            
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qdivint
inline
            
mpd_iszerocoeff inlined into mpd_qdivint 
mpd_qdivint
4004
            mpd_seterror(q, MPD_Division_undefined, status);
inline
            
mpd_seterror can be inlined into mpd_qdivint with cost=130 (threshold=250) 
mpd_qdivint
inline
            
mpd_seterror inlined into mpd_qdivint 
mpd_qdivint
4005
        }
4006
        else {
4007
            mpd_setspecial(q, sign, MPD_INF);
inline
            
mpd_setspecial can be inlined into mpd_qdivint with cost=120 (threshold=250) 
mpd_qdivint
inline
            
mpd_setspecial inlined into mpd_qdivint 
mpd_qdivint
4008
            *status |= MPD_Division_by_zero;
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
mpd_qdivint
4009
        }
4010
        return;
4011
    }
4012
4013
4014
    _mpd_qdivmod(q, &r, a, b, ctx, status);
inline
    
_mpd_qdivmod too costly to inline (cost=630, threshold=625) 
mpd_qdivint
inline
    
_mpd_qdivmod will not be inlined into mpd_qdivint 
mpd_qdivint
4015
    mpd_del(&r);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qdivint
inline
    
mpd_del inlined into mpd_qdivint 
mpd_qdivint
4016
    mpd_qfinalize(q, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qdivint
inline
    
mpd_qfinalize will not be inlined into mpd_qdivint 
mpd_qdivint
4017
}
4018
4019
/* Divide decimal by mpd_ssize_t. */
4020
void
4021
mpd_qdiv_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b,
4022
               const mpd_context_t *ctx, uint32_t *status)
4023
{
4024
    mpd_context_t maxcontext;
4025
    MPD_NEW_STATIC(bb,0,0,0,0);
4026
4027
    mpd_maxcontext(&maxcontext);
inline
    
mpd_maxcontext will not be inlined into mpd_qdiv_ssize because its definition is unavailable 
mpd_qdiv_ssize
4028
    mpd_qsset_ssize(&bb, b, &maxcontext, status);
inline
    
mpd_qsset_ssize can be inlined into mpd_qdiv_ssize with cost=65 (threshold=375) 
mpd_qdiv_ssize
inline
    
mpd_qsset_ssize inlined into mpd_qdiv_ssize 
mpd_qdiv_ssize
4029
    mpd_qdiv(result, a, &bb, ctx, status);
inline
    
mpd_qdiv can be inlined into mpd_qdiv_ssize with cost=5 (threshold=375) 
mpd_qdiv_ssize
inline
    
mpd_qdiv inlined into mpd_qdiv_ssize 
mpd_qdiv_ssize
4030
    mpd_del(&bb);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qdiv_ssize
inline
    
mpd_del inlined into mpd_qdiv_ssize 
mpd_qdiv_ssize
4031
}
4032
4033
/* Divide decimal by mpd_uint_t. */
4034
void
4035
mpd_qdiv_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b,
4036
              const mpd_context_t *ctx, uint32_t *status)
4037
{
4038
    mpd_context_t maxcontext;
4039
    MPD_NEW_STATIC(bb,0,0,0,0);
4040
4041
    mpd_maxcontext(&maxcontext);
inline
    
mpd_maxcontext will not be inlined into mpd_qdiv_uint because its definition is unavailable 
mpd_qdiv_uint
4042
    mpd_qsset_uint(&bb, b, &maxcontext, status);
inline
    
mpd_qsset_uint can be inlined into mpd_qdiv_uint with cost=45 (threshold=375) 
mpd_qdiv_uint
inline
    
mpd_qsset_uint inlined into mpd_qdiv_uint 
mpd_qdiv_uint
4043
    mpd_qdiv(result, a, &bb, ctx, status);
inline
    
mpd_qdiv can be inlined into mpd_qdiv_uint with cost=5 (threshold=375) 
mpd_qdiv_uint
inline
    
mpd_qdiv inlined into mpd_qdiv_uint 
mpd_qdiv_uint
4044
    mpd_del(&bb);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qdiv_uint
inline
    
mpd_del inlined into mpd_qdiv_uint 
mpd_qdiv_uint
4045
}
4046
4047
/* Divide decimal by int32_t. */
4048
void
4049
mpd_qdiv_i32(mpd_t *result, const mpd_t *a, int32_t b,
4050
             const mpd_context_t *ctx, uint32_t *status)
4051
{
4052
    mpd_qdiv_ssize(result, a, b, ctx, status);
inline
    
mpd_qdiv_ssize too costly to inline (cost=260, threshold=250) 
mpd_qdiv_i32
inline
    
mpd_qdiv_ssize will not be inlined into mpd_qdiv_i32 
mpd_qdiv_i32
4053
}
4054
4055
/* Divide decimal by uint32_t. */
4056
void
4057
mpd_qdiv_u32(mpd_t *result, const mpd_t *a, uint32_t b,
4058
             const mpd_context_t *ctx, uint32_t *status)
4059
{
4060
    mpd_qdiv_uint(result, a, b, ctx, status);
inline
    
mpd_qdiv_uint can be inlined into mpd_qdiv_u32 with cost=240 (threshold=250) 
mpd_qdiv_u32
inline
    
mpd_qdiv_uint inlined into mpd_qdiv_u32 
mpd_qdiv_u32
4061
}
4062
4063
#ifdef CONFIG_64
4064
/* Divide decimal by int64_t. */
4065
void
4066
mpd_qdiv_i64(mpd_t *result, const mpd_t *a, int64_t b,
4067
             const mpd_context_t *ctx, uint32_t *status)
4068
{
4069
    mpd_qdiv_ssize(result, a, b, ctx, status);
inline
    
mpd_qdiv_ssize too costly to inline (cost=260, threshold=250) 
mpd_qdiv_i64
inline
    
mpd_qdiv_ssize will not be inlined into mpd_qdiv_i64 
mpd_qdiv_i64
4070
}
4071
4072
/* Divide decimal by uint64_t. */
4073
void
4074
mpd_qdiv_u64(mpd_t *result, const mpd_t *a, uint64_t b,
4075
             const mpd_context_t *ctx, uint32_t *status)
4076
{
4077
    mpd_qdiv_uint(result, a, b, ctx, status);
inline
    
mpd_qdiv_uint can be inlined into mpd_qdiv_u64 with cost=240 (threshold=250) 
mpd_qdiv_u64
inline
    
mpd_qdiv_uint inlined into mpd_qdiv_u64 
mpd_qdiv_u64
4078
}
4079
#elif !defined(LEGACY_COMPILER)
4080
/* Divide decimal by int64_t. */
4081
void
4082
mpd_qdiv_i64(mpd_t *result, const mpd_t *a, int64_t b,
4083
             const mpd_context_t *ctx, uint32_t *status)
4084
{
4085
    mpd_context_t maxcontext;
4086
    MPD_NEW_STATIC(bb,0,0,0,0);
4087
4088
    mpd_maxcontext(&maxcontext);
4089
    mpd_qset_i64(&bb, b, &maxcontext, status);
4090
    mpd_qdiv(result, a, &bb, ctx, status);
4091
    mpd_del(&bb);
4092
}
4093
4094
/* Divide decimal by uint64_t. */
4095
void
4096
mpd_qdiv_u64(mpd_t *result, const mpd_t *a, uint64_t b,
4097
             const mpd_context_t *ctx, uint32_t *status)
4098
{
4099
    mpd_context_t maxcontext;
4100
    MPD_NEW_STATIC(bb,0,0,0,0);
4101
4102
    mpd_maxcontext(&maxcontext);
4103
    mpd_qset_u64(&bb, b, &maxcontext, status);
4104
    mpd_qdiv(result, a, &bb, ctx, status);
4105
    mpd_del(&bb);
4106
}
4107
#endif
4108
4109
/* Pad the result with trailing zeros if it has fewer digits than prec. */
4110
static void
4111
_mpd_zeropad(mpd_t *result, const mpd_context_t *ctx, uint32_t *status)
4112
{
4113
    if (!mpd_isspecial(result) && !mpd_iszero(result) &&
inline
         
mpd_isspecial should always be inlined (cost=always) 
_mpd_zeropad
inline
         
mpd_isspecial inlined into _mpd_zeropad 
_mpd_zeropad
inline
                                   
mpd_iszero should always be inlined (cost=always) 
_mpd_zeropad
inline
                                   
mpd_iszero inlined into _mpd_zeropad 
_mpd_zeropad
4114
        result->digits < ctx->prec) {
gvn
                
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qexp
4115
       mpd_ssize_t shift = ctx->prec - result->digits;
4116
       mpd_qshiftl(result, result, shift, status);
inline
       
mpd_qshiftl too costly to inline (cost=320, threshold=250) 
_mpd_zeropad
inline
       
mpd_qshiftl will not be inlined into _mpd_zeropad 
_mpd_zeropad
inline
       
mpd_qshiftl too costly to inline (cost=320, threshold=250) 
mpd_qexp
inline
       
mpd_qshiftl will not be inlined into mpd_qexp 
mpd_qexp
4117
       result->exp -= shift;
gvn
                   
load of type i64 not eliminated because it is clobbered by call 
_mpd_zeropad
gvn
                   
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qexp
4118
    }
4119
}
4120
4121
/* Check if the result is guaranteed to be one. */
4122
static int
4123
_mpd_qexp_check_one(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
4124
                    uint32_t *status)
4125
{
4126
    MPD_NEW_CONST(lim,0,-(ctx->prec+1),1,1,1,9);
4127
    MPD_NEW_SHARED(aa, a);
gvn
    
load of type i64 eliminated in favor of load 
_mpd_qexp
4128
4129
    mpd_set_positive(&aa);
inline
    
mpd_set_positive should always be inlined (cost=always) 
_mpd_qexp_check_one
inline
    
mpd_set_positive inlined into _mpd_qexp_check_one 
_mpd_qexp_check_one
4130
4131
    /* abs(a) <= 9 * 10**(-prec-1) */
4132
    if (_mpd_cmp(&aa, &lim) <= 0) {
inline
        
_mpd_cmp too costly to inline (cost=525, threshold=250) 
_mpd_qexp_check_one
inline
        
_mpd_cmp will not be inlined into _mpd_qexp_check_one 
_mpd_qexp_check_one
inline
        
_mpd_cmp too costly to inline (cost=525, threshold=250) 
_mpd_qexp
inline
        
_mpd_cmp will not be inlined into _mpd_qexp 
_mpd_qexp
4133
        _settriple(result, 0, 1, 0);
inline
        
_settriple can be inlined into _mpd_qexp_check_one with cost=180 (threshold=250) 
_mpd_qexp_check_one
inline
        
_settriple inlined into _mpd_qexp_check_one 
_mpd_qexp_check_one
4134
        *status |= MPD_Rounded|MPD_Inexact;
gvn
                
load of type i32 not eliminated because it is clobbered by call 
_mpd_qexp_check_one
gvn
                
load of type i32 not eliminated because it is clobbered by call 
_mpd_qexp
4135
        return 1;
4136
    }
4137
4138
    return 0;
4139
}
4140
4141
/*
4142
 * Get the number of iterations for the Horner scheme in _mpd_qexp().
4143
 */
4144
static inline mpd_ssize_t
4145
_mpd_get_exp_iterations(const mpd_t *r, mpd_ssize_t p)
4146
{
4147
    mpd_ssize_t log10pbyr; /* lower bound for log10(p / abs(r)) */
4148
    mpd_ssize_t n;
4149
4150
    assert(p >= 10);
4151
    assert(!mpd_iszero(r));
4152
    assert(-p < mpd_adjexp(r) && mpd_adjexp(r) <= -1);
4153
4154
#ifdef CONFIG_64
4155
    if (p > (mpd_ssize_t)(1ULL<<52)) {
4156
        return MPD_SSIZE_MAX;
4157
    }
4158
#endif
4159
4160
    /*
4161
     * Lower bound for log10(p / abs(r)): adjexp(p) - (adjexp(r) + 1)
4162
     * At this point (for CONFIG_64, CONFIG_32 is not problematic):
4163
     *    1) 10 <= p <= 2**52
4164
     *    2) -p < adjexp(r) <= -1
4165
     *    3) 1 <= log10pbyr <= 2**52 + 14
4166
     */
4167
    log10pbyr = (mpd_word_digits(p)-1) - (mpd_adjexp(r)+1);
inline
                 
mpd_word_digits should always be inlined (cost=always) 
_mpd_get_exp_iterations
inline
                 
mpd_word_digits inlined into _mpd_get_exp_iterations 
_mpd_get_exp_iterations
inline
                                          
mpd_adjexp should always be inlined (cost=always) 
_mpd_get_exp_iterations
inline
                                          
mpd_adjexp inlined into _mpd_get_exp_iterations 
_mpd_get_exp_iterations
4168
4169
    /*
4170
     * The numerator in the paper is 1.435 * p - 1.182, calculated
4171
     * exactly. We compensate for rounding errors by using 1.43503.
4172
     * ACL2 proofs:
4173
     *    1) exp-iter-approx-lower-bound: The term below evaluated
4174
     *       in 53-bit floating point arithmetic is greater than or
4175
     *       equal to the exact term used in the paper.
4176
     *    2) exp-iter-approx-upper-bound: The term below is less than
4177
     *       or equal to 3/2 * p <= 3/2 * 2**52.
4178
     */
4179
    n = (mpd_ssize_t)ceil((1.43503*(double)p - 1.182) / (double)log10pbyr);
inline
                     
ceil will not be inlined into _mpd_get_exp_iterations because its definition is unavailable 
_mpd_get_exp_iterations
4180
    return n >= 3 ? n : 3;
4181
}
4182
4183
/*
4184
 * Internal function, specials have been dealt with. Apart from Overflow
4185
 * and Underflow, two cases must be considered for the error of the result:
4186
 *
4187
 *   1) abs(a) <= 9 * 10**(-prec-1)  ==>  result == 1
4188
 *
4189
 *      Absolute error: abs(1 - e**x) < 10**(-prec)
4190
 *      -------------------------------------------
4191
 *
4192
 *   2) abs(a) > 9 * 10**(-prec-1)
4193
 *
4194
 *      Relative error: abs(result - e**x) < 0.5 * 10**(-prec) * e**x
4195
 *      -------------------------------------------------------------
4196
 *
4197
 * The algorithm is from Hull&Abrham, Variable Precision Exponential Function,
4198
 * ACM Transactions on Mathematical Software, Vol. 12, No. 2, June 1986.
4199
 *
4200
 * Main differences:
4201
 *
4202
 *  - The number of iterations for the Horner scheme is calculated using
4203
 *    53-bit floating point arithmetic.
4204
 *
4205
 *  - In the error analysis for ER (relative error accumulated in the
4206
 *    evaluation of the truncated series) the reduced operand r may
4207
 *    have any number of digits.
4208
 *    ACL2 proof: exponent-relative-error
4209
 *
4210
 *  - The analysis for early abortion has been adapted for the mpd_t
4211
 *    ranges.
4212
 */
4213
static void
4214
_mpd_qexp(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
4215
          uint32_t *status)
4216
{
4217
    mpd_context_t workctx;
4218
    MPD_NEW_STATIC(tmp,0,0,0,0);
4219
    MPD_NEW_STATIC(sum,0,0,0,0);
4220
    MPD_NEW_CONST(word,0,0,1,1,1,1);
4221
    mpd_ssize_t j, n, t;
4222
4223
    assert(!mpd_isspecial(a));
4224
4225
    if (mpd_iszerocoeff(a)) {
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
_mpd_qexp
inline
        
mpd_iszerocoeff inlined into _mpd_qexp 
_mpd_qexp
4226
        _settriple(result, MPD_POS, 1, 0);
inline
        
_settriple can be inlined into _mpd_qexp with cost=180 (threshold=250) 
_mpd_qexp
inline
        
_settriple inlined into _mpd_qexp 
_mpd_qexp
4227
        return;
4228
    }
4229
4230
    /*
4231
     * We are calculating e^x = e^(r*10^t) = (e^r)^(10^t), where abs(r) < 1 and t >= 0.
4232
     *
4233
     * If t > 0, we have:
4234
     *
4235
     *   (1) 0.1 <= r < 1, so e^0.1 <= e^r. If t > MAX_T, overflow occurs:
4236
     *
4237
     *     MAX-EMAX+1 < log10(e^(0.1*10*t)) <= log10(e^(r*10^t)) < adjexp(e^(r*10^t))+1
4238
     *
4239
     *   (2) -1 < r <= -0.1, so e^r <= e^-0.1. If t > MAX_T, underflow occurs:
4240
     *
4241
     *     adjexp(e^(r*10^t)) <= log10(e^(r*10^t)) <= log10(e^(-0.1*10^t)) < MIN-ETINY
4242
     */
4243
#if defined(CONFIG_64)
4244
    #define MPD_EXP_MAX_T 19
4245
#elif defined(CONFIG_32)
4246
    #define MPD_EXP_MAX_T 10
4247
#endif
4248
    t = a->digits + a->exp;
4249
    t = (t > 0) ? t : 0;
4250
    if (t > MPD_EXP_MAX_T) {
4251
        if (mpd_ispositive(a)) {
inline
            
mpd_ispositive should always be inlined (cost=always) 
_mpd_qexp
inline
            
mpd_ispositive inlined into _mpd_qexp 
_mpd_qexp
4252
            mpd_setspecial(result, MPD_POS, MPD_INF);
inline
            
mpd_setspecial can be inlined into _mpd_qexp with cost=115 (threshold=250) 
_mpd_qexp
inline
            
mpd_setspecial inlined into _mpd_qexp 
_mpd_qexp
4253
            *status |= MPD_Overflow|MPD_Inexact|MPD_Rounded;
4254
        }
4255
        else {
4256
            _settriple(result, MPD_POS, 0, mpd_etiny(ctx));
inline
                                           
mpd_etiny should always be inlined (cost=always) 
_mpd_qexp
inline
                                           
mpd_etiny inlined into _mpd_qexp 
_mpd_qexp
inline
            
_settriple can be inlined into _mpd_qexp with cost=180 (threshold=250) 
_mpd_qexp
inline
            
_settriple inlined into _mpd_qexp 
_mpd_qexp
4257
            *status |= (MPD_Inexact|MPD_Rounded|MPD_Subnormal|
4258
                        MPD_Underflow|MPD_Clamped);
4259
        }
4260
        return;
4261
    }
4262
4263
    /* abs(a) <= 9 * 10**(-prec-1) */
4264
    if (_mpd_qexp_check_one(result, a, ctx, status)) {
inline
        
_mpd_qexp_check_one can be inlined into _mpd_qexp with cost=-14390 (threshold=250) 
_mpd_qexp
inline
        
_mpd_qexp_check_one inlined into _mpd_qexp 
_mpd_qexp
4265
        return;
4266
    }
4267
4268
    mpd_maxcontext(&workctx);
inline
    
mpd_maxcontext will not be inlined into _mpd_qexp because its definition is unavailable 
_mpd_qexp
4269
    workctx.prec = ctx->prec + t + 2;
gvn
                        
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qexp
4270
    workctx.prec = (workctx.prec < 10) ? 10 : workctx.prec;
4271
    workctx.round = MPD_ROUND_HALF_EVEN;
4272
4273
    if (!mpd_qcopy(result, a, status)) {
inline
         
mpd_qcopy can be inlined into _mpd_qexp with cost=215 (threshold=250) 
_mpd_qexp
inline
         
mpd_qcopy inlined into _mpd_qexp 
_mpd_qexp
4274
        return;
4275
    }
4276
    result->exp -= t;
gvn
                
load of type i64 not eliminated because it is clobbered by call 
_mpd_qexp
4277
4278
    /*
4279
     * At this point:
4280
     *    1) 9 * 10**(-prec-1) < abs(a)
4281
     *    2) 9 * 10**(-prec-t-1) < abs(r)
4282
     *    3) log10(9) - prec - t - 1 < log10(abs(r)) < adjexp(abs(r)) + 1
4283
     *    4) - prec - t - 2 < adjexp(abs(r)) <= -1
4284
     */
4285
    n = _mpd_get_exp_iterations(result, workctx.prec);
inline
        
_mpd_get_exp_iterations can be inlined into _mpd_qexp with cost=-14675 (threshold=325) 
_mpd_qexp
inline
        
_mpd_get_exp_iterations inlined into _mpd_qexp 
_mpd_qexp
gvn
                                                
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qexp
gvn
                                                
load eliminated by PRE 
_mpd_qexp
4286
    if (n == MPD_SSIZE_MAX) {
4287
        mpd_seterror(result, MPD_Invalid_operation, status); /* GCOV_UNLIKELY */
inline
        
mpd_seterror can be inlined into _mpd_qexp with cost=130 (threshold=250) 
_mpd_qexp
inline
        
mpd_seterror inlined into _mpd_qexp 
_mpd_qexp
4288
        return; /* GCOV_UNLIKELY */
4289
    }
4290
4291
    _settriple(&sum, MPD_POS, 1, 0);
inline
    
_settriple can be inlined into _mpd_qexp with cost=180 (threshold=250) 
_mpd_qexp
inline
    
_settriple inlined into _mpd_qexp 
_mpd_qexp
4292
4293
    for (j = n-1; j >= 1; j--) {
loop-vectorize
    
loop not vectorized 
_mpd_qexp
4294
        word.data[0] = j;
licm
             
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qexp
gvn
             
load of type i64* not eliminated in favor of store because it is clobbered by call 
_mpd_qexp
4295
        mpd_setdigits(&word);
inline
        
mpd_setdigits can be inlined into _mpd_qexp with cost=280 (threshold=325) 
_mpd_qexp
inline
        
mpd_setdigits inlined into _mpd_qexp 
_mpd_qexp
4296
        mpd_qdiv(&tmp, result, &word, &workctx, &workctx.status);
inline
        
mpd_qdiv can be inlined into _mpd_qexp with cost=5 (threshold=375) 
_mpd_qexp
inline
        
mpd_qdiv inlined into _mpd_qexp 
_mpd_qexp
licm
                                                         
hosting getelementptr 
_mpd_qexp
4297
        mpd_qfma(&sum, &sum, &tmp, &one, &workctx, &workctx.status);
inline
        
mpd_qfma too costly to inline (cost=485, threshold=250) 
_mpd_qexp
inline
        
mpd_qfma will not be inlined into _mpd_qexp 
_mpd_qexp
4298
    }
4299
4300
#ifdef CONFIG_64
4301
    _mpd_qpow_uint(result, &sum, mpd_pow10[t], MPD_POS, &workctx, status);
inline
    
_mpd_qpow_uint too costly to inline (cost=840, threshold=812) 
_mpd_qexp
inline
    
_mpd_qpow_uint will not be inlined into _mpd_qexp 
_mpd_qexp
4302
#else
4303
    if (t <= MPD_MAX_POW10) {
4304
        _mpd_qpow_uint(result, &sum, mpd_pow10[t], MPD_POS, &workctx, status);
4305
    }
4306
    else {
4307
        t -= MPD_MAX_POW10;
4308
        _mpd_qpow_uint(&tmp, &sum, mpd_pow10[MPD_MAX_POW10], MPD_POS,
4309
                       &workctx, status);
4310
        _mpd_qpow_uint(result, &tmp, mpd_pow10[t], MPD_POS, &workctx, status);
4311
    }
4312
#endif
4313
4314
    mpd_del(&tmp);
inline
    
mpd_del should always be inlined (cost=always) 
_mpd_qexp
inline
    
mpd_del inlined into _mpd_qexp 
_mpd_qexp
4315
    mpd_del(&sum);
inline
    
mpd_del should always be inlined (cost=always) 
_mpd_qexp
inline
    
mpd_del inlined into _mpd_qexp 
_mpd_qexp
4316
    *status |= (workctx.status&MPD_Errors);
gvn
                        
load of type i32 not eliminated because it is clobbered by call 
_mpd_qexp
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qexp
4317
    *status |= (MPD_Inexact|MPD_Rounded);
4318
}
4319
4320
/* exp(a) */
4321
void
4322
mpd_qexp(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
4323
         uint32_t *status)
4324
{
4325
    mpd_context_t workctx;
4326
4327
    if (mpd_isspecial(a)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qexp
inline
        
mpd_isspecial inlined into mpd_qexp 
mpd_qexp
4328
        if (mpd_qcheck_nan(result, a, ctx, status)) {
inline
            
mpd_qcheck_nan too costly to inline (cost=335, threshold=250) 
mpd_qexp
inline
            
mpd_qcheck_nan will not be inlined into mpd_qexp 
mpd_qexp
4329
            return;
4330
        }
4331
        if (mpd_isnegative(a)) {
inline
            
mpd_isnegative should always be inlined (cost=always) 
mpd_qexp
inline
            
mpd_isnegative inlined into mpd_qexp 
mpd_qexp
4332
            _settriple(result, MPD_POS, 0, 0);
inline
            
_settriple can be inlined into mpd_qexp with cost=180 (threshold=250) 
mpd_qexp
inline
            
_settriple inlined into mpd_qexp 
mpd_qexp
4333
        }
4334
        else {
4335
            mpd_setspecial(result, MPD_POS, MPD_INF);
inline
            
mpd_setspecial can be inlined into mpd_qexp with cost=115 (threshold=250) 
mpd_qexp
inline
            
mpd_setspecial inlined into mpd_qexp 
mpd_qexp
4336
        }
4337
        return;
4338
    }
4339
    if (mpd_iszerocoeff(a)) {
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qexp
inline
        
mpd_iszerocoeff inlined into mpd_qexp 
mpd_qexp
4340
        _settriple(result, MPD_POS, 1, 0);
inline
        
_settriple can be inlined into mpd_qexp with cost=180 (threshold=250) 
mpd_qexp
inline
        
_settriple inlined into mpd_qexp 
mpd_qexp
4341
        return;
4342
    }
4343
4344
    workctx = *ctx;
4345
    workctx.round = MPD_ROUND_HALF_EVEN;
4346
4347
    if (ctx->allcr) {
4348
        MPD_NEW_STATIC(t1, 0,0,0,0);
4349
        MPD_NEW_STATIC(t2, 0,0,0,0);
4350
        MPD_NEW_STATIC(ulp, 0,0,0,0);
4351
        MPD_NEW_STATIC(aa, 0,0,0,0);
4352
        mpd_ssize_t prec;
4353
        mpd_ssize_t ulpexp;
4354
        uint32_t workstatus;
4355
4356
        if (result == a) {
4357
            if (!mpd_qcopy(&aa, a, status)) {
inline
                 
mpd_qcopy can be inlined into mpd_qexp with cost=215 (threshold=250) 
mpd_qexp
inline
                 
mpd_qcopy inlined into mpd_qexp 
mpd_qexp
4358
                mpd_seterror(result, MPD_Malloc_error, status);
inline
                
mpd_seterror can be inlined into mpd_qexp with cost=130 (threshold=250) 
mpd_qexp
inline
                
mpd_seterror inlined into mpd_qexp 
mpd_qexp
4359
                return;
4360
            }
4361
            a = &aa;
4362
        }
4363
4364
        workctx.clamp = 0;
4365
        prec = ctx->prec + 3;
gvn
                    
load of type i64 not eliminated because it is clobbered by call 
mpd_qexp
4366
        while (1) {
loop-vectorize
        
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qexp
loop-vectorize
        
loop not vectorized 
mpd_qexp
4367
            workctx.prec = prec;
licm
                    
hosting getelementptr 
mpd_qexp
4368
            workstatus = 0;
4369
4370
            _mpd_qexp(result, a, &workctx, &workstatus);
inline
            
_mpd_qexp too costly to inline (cost=645, threshold=625) 
mpd_qexp
inline
            
_mpd_qexp will not be inlined into mpd_qexp 
mpd_qexp
4371
            *status |= workstatus;
licm
                       
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qexp
licm
                    
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qexp
gvn
                       
load of type i32 not eliminated in favor of store because it is clobbered by call 
mpd_qexp
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
mpd_qexp
4372
4373
            ulpexp = result->exp + result->digits - workctx.prec;
licm
                             
hosting getelementptr 
mpd_qexp
licm
                             
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qexp
licm
                                           
hosting getelementptr 
mpd_qexp
licm
                                           
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qexp
licm
                                                            
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qexp
gvn
                             
load of type i64 not eliminated because it is clobbered by call 
mpd_qexp
gvn
                                           
load of type i64 not eliminated because it is clobbered by call 
mpd_qexp
gvn
                                                            
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qexp
4374
            if (workstatus & MPD_Underflow) {
licm
                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qexp
gvn
                
load of type i32 eliminated in favor of load 
mpd_qexp
4375
                /* The effective work precision is result->digits. */
4376
                ulpexp = result->exp;
4377
            }
4378
            _ssettriple(&ulp, MPD_POS, 1, ulpexp);
inline
            
_ssettriple too costly to inline (cost=300, threshold=250) 
mpd_qexp
inline
            
_ssettriple will not be inlined into mpd_qexp 
mpd_qexp
4379
4380
            /*
4381
             * At this point [1]:
4382
             *   1) abs(result - e**x) < 0.5 * 10**(-prec) * e**x
4383
             *   2) result - ulp < e**x < result + ulp
4384
             *   3) result - ulp < result < result + ulp
4385
             *
4386
             * If round(result-ulp)==round(result+ulp), then
4387
             * round(result)==round(e**x). Therefore the result
4388
             * is correctly rounded.
4389
             *
4390
             * [1] If abs(a) <= 9 * 10**(-prec-1), use the absolute
4391
             *     error for a similar argument.
4392
             */
4393
            workctx.prec = ctx->prec;
licm
                                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qexp
gvn
                                
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qexp
4394
            mpd_qadd(&t1, result, &ulp, &workctx, &workctx.status);
inline
            
mpd_qadd too costly to inline (cost=660, threshold=625) 
mpd_qexp
inline
            
mpd_qadd will not be inlined into mpd_qexp 
mpd_qexp
licm
                                                           
hosting getelementptr 
mpd_qexp
4395
            mpd_qsub(&t2, result, &ulp, &workctx, &workctx.status);
inline
            
mpd_qsub too costly to inline (cost=670, threshold=625) 
mpd_qexp
inline
            
mpd_qsub will not be inlined into mpd_qexp 
mpd_qexp
4396
            if (mpd_isspecial(result) || mpd_iszerocoeff(result) ||
inline
                
mpd_isspecial should always be inlined (cost=always) 
mpd_qexp
inline
                
mpd_isspecial inlined into mpd_qexp 
mpd_qexp
inline
                                         
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qexp
inline
                                         
mpd_iszerocoeff inlined into mpd_qexp 
mpd_qexp
4397
                mpd_qcmp(&t1, &t2, status) == 0) {
inline
                
mpd_qcmp can be inlined into mpd_qexp with cost=85 (threshold=250) 
mpd_qexp
inline
                
mpd_qcmp inlined into mpd_qexp 
mpd_qexp
4398
                workctx.clamp = ctx->clamp;
gvn
                                     
load of type i32 not eliminated because it is clobbered by call 
mpd_qexp
4399
                _mpd_zeropad(result, &workctx, status);
inline
                
_mpd_zeropad can be inlined into mpd_qexp with cost=-14905 (threshold=250) 
mpd_qexp
inline
                
_mpd_zeropad inlined into mpd_qexp 
mpd_qexp
4400
                mpd_check_underflow(result, &workctx, status);
inline
                
mpd_check_underflow can be inlined into mpd_qexp with cost=70 (threshold=325) 
mpd_qexp
inline
                
mpd_check_underflow inlined into mpd_qexp 
mpd_qexp
4401
                mpd_qfinalize(result, &workctx, status);
inline
                
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qexp
inline
                
mpd_qfinalize will not be inlined into mpd_qexp 
mpd_qexp
4402
                break;
4403
            }
4404
            prec += MPD_RDIGITS;
4405
        }
4406
        mpd_del(&t1);
inline
        
mpd_del should always be inlined (cost=always) 
mpd_qexp
inline
        
mpd_del inlined into mpd_qexp 
mpd_qexp
4407
        mpd_del(&t2);
inline
        
mpd_del should always be inlined (cost=always) 
mpd_qexp
inline
        
mpd_del inlined into mpd_qexp 
mpd_qexp
4408
        mpd_del(&ulp);
inline
        
mpd_del should always be inlined (cost=always) 
mpd_qexp
inline
        
mpd_del inlined into mpd_qexp 
mpd_qexp
4409
        mpd_del(&aa);
inline
        
mpd_del should always be inlined (cost=always) 
mpd_qexp
inline
        
mpd_del inlined into mpd_qexp 
mpd_qexp
4410
    }
4411
    else {
4412
        _mpd_qexp(result, a, &workctx, status);
inline
        
_mpd_qexp too costly to inline (cost=645, threshold=625) 
mpd_qexp
inline
        
_mpd_qexp will not be inlined into mpd_qexp 
mpd_qexp
4413
        _mpd_zeropad(result, &workctx, status);
inline
        
_mpd_zeropad can be inlined into mpd_qexp with cost=95 (threshold=250) 
mpd_qexp
inline
        
_mpd_zeropad inlined into mpd_qexp 
mpd_qexp
4414
        mpd_check_underflow(result, &workctx, status);
inline
        
mpd_check_underflow can be inlined into mpd_qexp with cost=70 (threshold=325) 
mpd_qexp
inline
        
mpd_check_underflow inlined into mpd_qexp 
mpd_qexp
4415
        mpd_qfinalize(result, &workctx, status);
inline
        
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qexp
inline
        
mpd_qfinalize will not be inlined into mpd_qexp 
mpd_qexp
4416
    }
4417
}
4418
4419
/* Fused multiply-add: (a * b) + c, with a single final rounding. */
4420
void
4421
mpd_qfma(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_t *c,
4422
         const mpd_context_t *ctx, uint32_t *status)
4423
{
4424
    uint32_t workstatus = 0;
4425
    mpd_t *cc = NULL;
4426
4427
    if (result == c) {
4428
        if ((cc = mpd_qncopy(c)) == NULL) {
inline
                  
mpd_qncopy can be inlined into mpd_qfma with cost=100 (threshold=250) 
mpd_qfma
inline
                  
mpd_qncopy inlined into mpd_qfma 
mpd_qfma
4429
            mpd_seterror(result, MPD_Malloc_error, status);
inline
            
mpd_seterror can be inlined into mpd_qfma with cost=130 (threshold=250) 
mpd_qfma
inline
            
mpd_seterror inlined into mpd_qfma 
mpd_qfma
4430
            return;
4431
        }
4432
        c = cc;
4433
    }
4434
4435
    _mpd_qmul(result, a, b, ctx, &workstatus);
inline
    
_mpd_qmul too costly to inline (cost=815, threshold=812) 
mpd_qfma
inline
    
_mpd_qmul will not be inlined into mpd_qfma 
mpd_qfma
4436
    if (!(workstatus&MPD_Invalid_operation)) {
gvn
          
load of type i32 not eliminated in favor of store because it is clobbered by call 
mpd_qfma
4437
        mpd_qadd(result, result, c, ctx, &workstatus);
inline
        
mpd_qadd too costly to inline (cost=660, threshold=625) 
mpd_qfma
inline
        
mpd_qadd will not be inlined into mpd_qfma 
mpd_qfma
4438
    }
4439
4440
    if (cc) mpd_del(cc);
inline
            
mpd_del should always be inlined (cost=always) 
mpd_qfma
inline
            
mpd_del inlined into mpd_qfma 
mpd_qfma
4441
    *status |= workstatus;
gvn
               
load of type i32 not eliminated because it is clobbered by call 
mpd_qfma
4442
}
4443
4444
/*
4445
 * Schedule the optimal precision increase for the Newton iteration.
4446
 *   v := input operand
4447
 *   z_0 := initial approximation
4448
 *   initprec := natural number such that abs(log(v) - z_0) < 10**-initprec
4449
 *   maxprec := target precision
4450
 *
4451
 * For convenience the output klist contains the elements in reverse order:
4452
 *   klist := [k_n-1, ..., k_0], where
4453
 *     1) k_0 <= initprec and
4454
 *     2) abs(log(v) - result) < 10**(-2*k_n-1 + 1) <= 10**-maxprec.
4455
 */
4456
static inline int
4457
ln_schedule_prec(mpd_ssize_t klist[MPD_MAX_PREC_LOG2], mpd_ssize_t maxprec,
4458
                 mpd_ssize_t initprec)
4459
{
4460
    mpd_ssize_t k;
4461
    int i;
4462
4463
    assert(maxprec >= 2 && initprec >= 2);
4464
    if (maxprec <= initprec) return -1;
4465
4466
    i = 0; k = maxprec;
4467
    do {
loop-vectorize
    
loop not vectorized: could not determine number of loop iterations 
mpd_qln10
loop-vectorize
    
loop not vectorized 
mpd_qln10
loop-vectorize
    
loop not vectorized: could not determine number of loop iterations 
_mpd_qln
loop-vectorize
    
loop not vectorized 
_mpd_qln
4468
        k = (k+2) / 2;
4469
        klist[i++] = k;
4470
    } while (k > initprec);
4471
4472
    return i-1;
4473
}
4474
4475
/* The constants have been verified with both decimal.py and mpfr. */
4476
#ifdef CONFIG_64
4477
#if MPD_RDIGITS != 19
4478
  #error "mpdecimal.c: MPD_RDIGITS must be 19."
4479
#endif
4480
static const mpd_uint_t mpd_ln10_data[MPD_MINALLOC_MAX] = {
4481
  6983716328982174407ULL, 9089704281976336583ULL, 1515961135648465461ULL,
4482
  4416816335727555703ULL, 2900988039194170265ULL, 2307925037472986509ULL,
4483
   107598438319191292ULL, 3466624107184669231ULL, 4450099781311469159ULL,
4484
  9807828059751193854ULL, 7713456862091670584ULL, 1492198849978748873ULL,
4485
  6528728696511086257ULL, 2385392051446341972ULL, 8692180205189339507ULL,
4486
  6518769751037497088ULL, 2375253577097505395ULL, 9095610299291824318ULL,
4487
   982748238504564801ULL, 5438635917781170543ULL, 7547331541421808427ULL,
4488
   752371033310119785ULL, 3171643095059950878ULL, 9785265383207606726ULL,
4489
  2932258279850258550ULL, 5497347726624257094ULL, 2976979522110718264ULL,
4490
  9221477656763693866ULL, 1979650047149510504ULL, 6674183485704422507ULL,
4491
  9702766860595249671ULL, 9278096762712757753ULL, 9314848524948644871ULL,
4492
  6826928280848118428ULL,  754403708474699401ULL,  230105703089634572ULL,
4493
  1929203337658714166ULL, 7589402567763113569ULL, 4208241314695689016ULL,
4494
  2922455440575892572ULL, 9356734206705811364ULL, 2684916746550586856ULL,
4495
   644507064800027750ULL, 9476834636167921018ULL, 5659121373450747856ULL,
4496
  2835522011480466371ULL, 6470806855677432162ULL, 7141748003688084012ULL,
4497
  9619404400222105101ULL, 5504893431493939147ULL, 6674744042432743651ULL,
4498
  2287698219886746543ULL, 7773262884616336622ULL, 1985283935053089653ULL,
4499
  4680843799894826233ULL, 8168948290720832555ULL, 8067566662873690987ULL,
4500
  6248633409525465082ULL, 9829834196778404228ULL, 3524802359972050895ULL,
4501
  3327900967572609677ULL,  110148862877297603ULL,  179914546843642076ULL,
4502
  2302585092994045684ULL
4503
};
4504
#else
4505
#if MPD_RDIGITS != 9
4506
  #error "mpdecimal.c: MPD_RDIGITS must be 9."
4507
#endif
4508
static const mpd_uint_t mpd_ln10_data[MPD_MINALLOC_MAX] = {
4509
  401682692UL, 708474699UL, 720754403UL,  30896345UL, 602301057UL, 765871416UL,
4510
  192920333UL, 763113569UL, 589402567UL, 956890167UL,  82413146UL, 589257242UL,
4511
  245544057UL, 811364292UL, 734206705UL, 868569356UL, 167465505UL, 775026849UL,
4512
  706480002UL,  18064450UL, 636167921UL, 569476834UL, 734507478UL, 156591213UL,
4513
  148046637UL, 283552201UL, 677432162UL, 470806855UL, 880840126UL, 417480036UL,
4514
  210510171UL, 940440022UL, 939147961UL, 893431493UL, 436515504UL, 440424327UL,
4515
  654366747UL, 821988674UL, 622228769UL, 884616336UL, 537773262UL, 350530896UL,
4516
  319852839UL, 989482623UL, 468084379UL, 720832555UL, 168948290UL, 736909878UL,
4517
  675666628UL, 546508280UL, 863340952UL, 404228624UL, 834196778UL, 508959829UL,
4518
   23599720UL, 967735248UL,  96757260UL, 603332790UL, 862877297UL, 760110148UL,
4519
  468436420UL, 401799145UL, 299404568UL, 230258509UL
4520
};
4521
#endif
4522
/* _mpd_ln10 is used directly for precisions smaller than MINALLOC_MAX*RDIGITS.
4523
   Otherwise, it serves as the initial approximation for calculating ln(10). */
4524
static const mpd_t _mpd_ln10 = {
4525
  MPD_STATIC|MPD_CONST_DATA, -(MPD_MINALLOC_MAX*MPD_RDIGITS-1),
4526
  MPD_MINALLOC_MAX*MPD_RDIGITS, MPD_MINALLOC_MAX, MPD_MINALLOC_MAX,
4527
  (mpd_uint_t *)mpd_ln10_data
4528
};
4529
4530
/*
4531
 * Set 'result' to log(10).
4532
 *   Ulp error: abs(result - log(10)) < ulp(log(10))
4533
 *   Relative error: abs(result - log(10)) < 5 * 10**-prec * log(10)
4534
 *
4535
 * NOTE: The relative error is not derived from the ulp error, but
4536
 * calculated separately using the fact that 23/10 < log(10) < 24/10.
4537
 */
4538
void
4539
mpd_qln10(mpd_t *result, mpd_ssize_t prec, uint32_t *status)
4540
{
4541
    mpd_context_t varcontext, maxcontext;
4542
    MPD_NEW_STATIC(tmp, 0,0,0,0);
4543
    MPD_NEW_CONST(static10, 0,0,2,1,1,10);
4544
    mpd_ssize_t klist[MPD_MAX_PREC_LOG2];
4545
    mpd_uint_t rnd;
4546
    mpd_ssize_t shift;
4547
    int i;
4548
4549
    assert(prec >= 1);
4550
4551
    shift = MPD_MINALLOC_MAX*MPD_RDIGITS-prec;
4552
    shift = shift < 0 ? 0 : shift;
4553
4554
    rnd = mpd_qshiftr(result, &_mpd_ln10, shift, status);
inline
          
mpd_qshiftr too costly to inline (cost=630, threshold=625) 
mpd_qln10
inline
          
mpd_qshiftr will not be inlined into mpd_qln10 
mpd_qln10
4555
    if (rnd == MPD_UINT_MAX) {
4556
        mpd_seterror(result, MPD_Malloc_error, status);
inline
        
mpd_seterror can be inlined into mpd_qln10 with cost=130 (threshold=250) 
mpd_qln10
inline
        
mpd_seterror inlined into mpd_qln10 
mpd_qln10
4557
        return;
4558
    }
4559
    result->exp = -(result->digits-1);
gvn
                            
load of type i64 not eliminated because it is clobbered by call 
mpd_qln10
4560
4561
    mpd_maxcontext(&maxcontext);
inline
    
mpd_maxcontext will not be inlined into mpd_qln10 because its definition is unavailable 
mpd_qln10
4562
    if (prec < MPD_MINALLOC_MAX*MPD_RDIGITS) {
4563
        maxcontext.prec = prec;
4564
        _mpd_apply_round_excess(result, rnd, &maxcontext, status);
inline
        
_mpd_apply_round_excess too costly to inline (cost=755, threshold=325) 
mpd_qln10
inline
        
_mpd_apply_round_excess will not be inlined into mpd_qln10 
mpd_qln10
4565
        *status |= (MPD_Inexact|MPD_Rounded);
gvn
                
load of type i32 not eliminated because it is clobbered by call 
mpd_qln10
4566
        return;
4567
    }
4568
4569
    mpd_maxcontext(&varcontext);
inline
    
mpd_maxcontext will not be inlined into mpd_qln10 because its definition is unavailable 
mpd_qln10
4570
    varcontext.round = MPD_ROUND_TRUNC;
4571
4572
    i = ln_schedule_prec(klist, prec+2, -result->exp);
inline
        
ln_schedule_prec can be inlined into mpd_qln10 with cost=5 (threshold=325) 
mpd_qln10
inline
        
ln_schedule_prec inlined into mpd_qln10 
mpd_qln10
gvn
                                                 
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qln10
4573
    for (; i >= 0; i--) {
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qln10
loop-vectorize
    
loop not vectorized 
mpd_qln10
4574
        varcontext.prec = 2*klist[i]+3;
licm
                   
hosting getelementptr 
mpd_qln10
4575
        result->flags ^= MPD_NEG;
licm
                
hosting getelementptr 
mpd_qln10
licm
                      
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qln10
gvn
                      
load of type i8 not eliminated because it is clobbered by call 
mpd_qln10
gvn
                      
load eliminated by PRE 
mpd_qln10
4576
        _mpd_qexp(&tmp, result, &varcontext, status);
inline
        
_mpd_qexp too costly to inline (cost=680, threshold=625) 
mpd_qln10
inline
        
_mpd_qexp will not be inlined into mpd_qln10 
mpd_qln10
4577
        result->flags ^= MPD_NEG;
licm
                      
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qln10
gvn
                      
load of type i8 not eliminated because it is clobbered by call 
mpd_qln10
4578
        mpd_qmul(&tmp, &static10, &tmp, &varcontext, status);
inline
        
mpd_qmul can be inlined into mpd_qln10 with cost=45 (threshold=375) 
mpd_qln10
inline
        
mpd_qmul inlined into mpd_qln10 
mpd_qln10
4579
        mpd_qsub(&tmp, &tmp, &one, &maxcontext, status);
inline
        
mpd_qsub too costly to inline (cost=670, threshold=625) 
mpd_qln10
inline
        
mpd_qsub will not be inlined into mpd_qln10 
mpd_qln10
4580
        mpd_qadd(result, result, &tmp, &maxcontext, status);
inline
        
mpd_qadd too costly to inline (cost=660, threshold=625) 
mpd_qln10
inline
        
mpd_qadd will not be inlined into mpd_qln10 
mpd_qln10
4581
        if (mpd_isspecial(result)) {
inline
            
mpd_isspecial should always be inlined (cost=always) 
mpd_qln10
inline
            
mpd_isspecial inlined into mpd_qln10 
mpd_qln10
4582
            break;
4583
        }
4584
    }
4585
4586
    mpd_del(&tmp);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qln10
inline
    
mpd_del inlined into mpd_qln10 
mpd_qln10
4587
    maxcontext.prec = prec;
4588
    mpd_qfinalize(result, &maxcontext, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qln10
inline
    
mpd_qfinalize will not be inlined into mpd_qln10 
mpd_qln10
4589
}
4590
4591
/*
4592
 * Initial approximations for the ln() iteration. The values have the
4593
 * following properties (established with both decimal.py and mpfr):
4594
 *
4595
 * Index 0 - 400, logarithms of x in [1.00, 5.00]:
4596
 *   abs(lnapprox[i] * 10**-3 - log((i+100)/100)) < 10**-2
4597
 *   abs(lnapprox[i] * 10**-3 - log((i+1+100)/100)) < 10**-2
4598
 *
4599
 * Index 401 - 899, logarithms of x in (0.500, 0.999]:
4600
 *   abs(-lnapprox[i] * 10**-3 - log((i+100)/1000)) < 10**-2
4601
 *   abs(-lnapprox[i] * 10**-3 - log((i+1+100)/1000)) < 10**-2
4602
 */
4603
static const uint16_t lnapprox[900] = {
4604
  /* index 0 - 400: log((i+100)/100) * 1000 */
4605
  0, 10, 20, 30, 39, 49, 58, 68, 77, 86, 95, 104, 113, 122, 131, 140, 148, 157,
4606
  166, 174, 182, 191, 199, 207, 215, 223, 231, 239, 247, 255, 262, 270, 278,
4607
  285, 293, 300, 308, 315, 322, 329, 336, 344, 351, 358, 365, 372, 378, 385,
4608
  392, 399, 406, 412, 419, 425, 432, 438, 445, 451, 457, 464, 470, 476, 482,
4609
  489, 495, 501, 507, 513, 519, 525, 531, 536, 542, 548, 554, 560, 565, 571,
4610
  577, 582, 588, 593, 599, 604, 610, 615, 621, 626, 631, 637, 642, 647, 652,
4611
  658, 663, 668, 673, 678, 683, 688, 693, 698, 703, 708, 713, 718, 723, 728,
4612
  732, 737, 742, 747, 751, 756, 761, 766, 770, 775, 779, 784, 788, 793, 798,
4613
  802, 806, 811, 815, 820, 824, 829, 833, 837, 842, 846, 850, 854, 859, 863,
4614
  867, 871, 876, 880, 884, 888, 892, 896, 900, 904, 908, 912, 916, 920, 924,
4615
  928, 932, 936, 940, 944, 948, 952, 956, 959, 963, 967, 971, 975, 978, 982,
4616
  986, 990, 993, 997, 1001, 1004, 1008, 1012, 1015, 1019, 1022, 1026, 1030,
4617
  1033, 1037, 1040, 1044, 1047, 1051, 1054, 1058, 1061, 1065, 1068, 1072, 1075,
4618
  1078, 1082, 1085, 1089, 1092, 1095, 1099, 1102, 1105, 1109, 1112, 1115, 1118,
4619
  1122, 1125, 1128, 1131, 1135, 1138, 1141, 1144, 1147, 1151, 1154, 1157, 1160,
4620
  1163, 1166, 1169, 1172, 1176, 1179, 1182, 1185, 1188, 1191, 1194, 1197, 1200,
4621
  1203, 1206, 1209, 1212, 1215, 1218, 1221, 1224, 1227, 1230, 1233, 1235, 1238,
4622
  1241, 1244, 1247, 1250, 1253, 1256, 1258, 1261, 1264, 1267, 1270, 1273, 1275,
4623
  1278, 1281, 1284, 1286, 1289, 1292, 1295, 1297, 1300, 1303, 1306, 1308, 1311,
4624
  1314, 1316, 1319, 1322, 1324, 1327, 1330, 1332, 1335, 1338, 1340, 1343, 1345,
4625
  1348, 1351, 1353, 1356, 1358, 1361, 1364, 1366, 1369, 1371, 1374, 1376, 1379,
4626
  1381, 1384, 1386, 1389, 1391, 1394, 1396, 1399, 1401, 1404, 1406, 1409, 1411,
4627
  1413, 1416, 1418, 1421, 1423, 1426, 1428, 1430, 1433, 1435, 1437, 1440, 1442,
4628
  1445, 1447, 1449, 1452, 1454, 1456, 1459, 1461, 1463, 1466, 1468, 1470, 1472,
4629
  1475, 1477, 1479, 1482, 1484, 1486, 1488, 1491, 1493, 1495, 1497, 1500, 1502,
4630
  1504, 1506, 1509, 1511, 1513, 1515, 1517, 1520, 1522, 1524, 1526, 1528, 1530,
4631
  1533, 1535, 1537, 1539, 1541, 1543, 1545, 1548, 1550, 1552, 1554, 1556, 1558,
4632
  1560, 1562, 1564, 1567, 1569, 1571, 1573, 1575, 1577, 1579, 1581, 1583, 1585,
4633
  1587, 1589, 1591, 1593, 1595, 1597, 1599, 1601, 1603, 1605, 1607, 1609,
4634
  /* index 401 - 899: -log((i+100)/1000) * 1000 */
4635
  691, 689, 687, 685, 683, 681, 679, 677, 675, 673, 671, 669, 668, 666, 664,
4636
  662, 660, 658, 656, 654, 652, 650, 648, 646, 644, 642, 641, 639, 637, 635,
4637
  633, 631, 629, 627, 626, 624, 622, 620, 618, 616, 614, 612, 611, 609, 607,
4638
  605, 603, 602, 600, 598, 596, 594, 592, 591, 589, 587, 585, 583, 582, 580,
4639
  578, 576, 574, 573, 571, 569, 567, 566, 564, 562, 560, 559, 557, 555, 553,
4640
  552, 550, 548, 546, 545, 543, 541, 540, 538, 536, 534, 533, 531, 529, 528,
4641
  526, 524, 523, 521, 519, 518, 516, 514, 512, 511, 509, 508, 506, 504, 502,
4642
  501, 499, 498, 496, 494, 493, 491, 489, 488, 486, 484, 483, 481, 480, 478,
4643
  476, 475, 473, 472, 470, 468, 467, 465, 464, 462, 460, 459, 457, 456, 454,
4644
  453, 451, 449, 448, 446, 445, 443, 442, 440, 438, 437, 435, 434, 432, 431,
4645
  429, 428, 426, 425, 423, 422, 420, 419, 417, 416, 414, 412, 411, 410, 408,
4646
  406, 405, 404, 402, 400, 399, 398, 396, 394, 393, 392, 390, 389, 387, 386,
4647
  384, 383, 381, 380, 378, 377, 375, 374, 372, 371, 370, 368, 367, 365, 364,
4648
  362, 361, 360, 358, 357, 355, 354, 352, 351, 350, 348, 347, 345, 344, 342,
4649
  341, 340, 338, 337, 336, 334, 333, 331, 330, 328, 327, 326, 324, 323, 322,
4650
  320, 319, 318, 316, 315, 313, 312, 311, 309, 308, 306, 305, 304, 302, 301,
4651
  300, 298, 297, 296, 294, 293, 292, 290, 289, 288, 286, 285, 284, 282, 281,
4652
  280, 278, 277, 276, 274, 273, 272, 270, 269, 268, 267, 265, 264, 263, 261,
4653
  260, 259, 258, 256, 255, 254, 252, 251, 250, 248, 247, 246, 245, 243, 242,
4654
  241, 240, 238, 237, 236, 234, 233, 232, 231, 229, 228, 227, 226, 224, 223,
4655
  222, 221, 219, 218, 217, 216, 214, 213, 212, 211, 210, 208, 207, 206, 205,
4656
  203, 202, 201, 200, 198, 197, 196, 195, 194, 192, 191, 190, 189, 188, 186,
4657
  185, 184, 183, 182, 180, 179, 178, 177, 176, 174, 173, 172, 171, 170, 168,
4658
  167, 166, 165, 164, 162, 161, 160, 159, 158, 157, 156, 154, 153, 152, 151,
4659
  150, 148, 147, 146, 145, 144, 143, 142, 140, 139, 138, 137, 136, 135, 134,
4660
  132, 131, 130, 129, 128, 127, 126, 124, 123, 122, 121, 120, 119, 118, 116,
4661
  115, 114, 113, 112, 111, 110, 109, 108, 106, 105, 104, 103, 102, 101, 100,
4662
  99, 98, 97, 95, 94, 93, 92, 91, 90, 89, 88, 87, 86, 84, 83, 82, 81, 80, 79,
4663
  78, 77, 76, 75, 74, 73, 72, 70, 69, 68, 67, 66, 65, 64, 63, 62, 61, 60, 59,
4664
  58, 57, 56, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39,
4665
  38, 37, 36, 35, 34, 33, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19,
4666
  18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1
4667
};
4668
4669
/*
4670
 * Internal ln() function that does not check for specials, zero or one.
4671
 * Relative error: abs(result - log(a)) < 0.1 * 10**-prec * abs(log(a))
4672
 */
4673
static void
4674
_mpd_qln(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
4675
         uint32_t *status)
4676
{
4677
    mpd_context_t varcontext, maxcontext;
4678
    mpd_t *z = (mpd_t *) result;
4679
    MPD_NEW_STATIC(v,0,0,0,0);
4680
    MPD_NEW_STATIC(vtmp,0,0,0,0);
4681
    MPD_NEW_STATIC(tmp,0,0,0,0);
4682
    mpd_ssize_t klist[MPD_MAX_PREC_LOG2];
4683
    mpd_ssize_t maxprec, shift, t;
4684
    mpd_ssize_t a_digits, a_exp;
4685
    mpd_uint_t dummy, x;
4686
    int i;
4687
4688
    assert(!mpd_isspecial(a) && !mpd_iszerocoeff(a));
4689
4690
    /*
4691
     * We are calculating ln(a) = ln(v * 10^t) = ln(v) + t*ln(10),
4692
     * where 0.5 < v <= 5.
4693
     */
4694
    if (!mpd_qcopy(&v, a, status)) {
inline
         
mpd_qcopy can be inlined into _mpd_qln with cost=215 (threshold=250) 
_mpd_qln
inline
         
mpd_qcopy inlined into _mpd_qln 
_mpd_qln
4695
        mpd_seterror(result, MPD_Malloc_error, status);
inline
        
mpd_seterror can be inlined into _mpd_qln with cost=130 (threshold=250) 
_mpd_qln
inline
        
mpd_seterror inlined into _mpd_qln 
_mpd_qln
4696
        goto finish;
4697
    }
4698
4699
    /* Initial approximation: we have at least one non-zero digit */
4700
    _mpd_get_msdigits(&dummy, &x, &v, 3);
inline
    
_mpd_get_msdigits can be inlined into _mpd_qln with cost=115 (threshold=325) 
_mpd_qln
inline
    
_mpd_get_msdigits inlined into _mpd_qln 
_mpd_qln
4701
    if (x < 10) x *= 10;
4702
    if (x < 100) x *= 10;
4703
    x -= 100;
4704
4705
    /* a may equal z */
4706
    a_digits = a->digits;
gvn
                  
load of type i64 not eliminated because it is clobbered by call 
_mpd_qln
4707
    a_exp = a->exp;
gvn
               
load of type i64 not eliminated because it is clobbered by call 
_mpd_qln
4708
4709
    mpd_minalloc(z);
inline
    
mpd_minalloc should always be inlined (cost=always) 
_mpd_qln
inline
    
mpd_minalloc inlined into _mpd_qln 
_mpd_qln
4710
    mpd_clear_flags(z);
inline
    
mpd_clear_flags should always be inlined (cost=always) 
_mpd_qln
inline
    
mpd_clear_flags inlined into _mpd_qln 
_mpd_qln
4711
    z->data[0] = lnapprox[x];
gvn
       
load of type i64* not eliminated because it is clobbered by call 
_mpd_qln
4712
    z->len = 1;
4713
    z->exp = -3;
4714
    mpd_setdigits(z);
inline
    
mpd_setdigits can be inlined into _mpd_qln with cost=295 (threshold=325) 
_mpd_qln
inline
    
mpd_setdigits inlined into _mpd_qln 
_mpd_qln
4715
4716
    if (x <= 400) {
4717
        /* Reduce the input operand to 1.00 <= v <= 5.00. Let y = x + 100,
4718
         * so 100 <= y <= 500. Since y contains the most significant digits
4719
         * of v, y/100 <= v < (y+1)/100 and abs(z - log(v)) < 10**-2. */
4720
        v.exp = -(a_digits - 1);
4721
        t = a_exp + a_digits - 1;
4722
    }
4723
    else {
4724
        /* Reduce the input operand to 0.500 < v <= 0.999. Let y = x + 100,
4725
         * so 500 < y <= 999. Since y contains the most significant digits
4726
         * of v, y/1000 <= v < (y+1)/1000 and abs(z - log(v)) < 10**-2. */
4727
        v.exp = -a_digits;
4728
        t = a_exp + a_digits;
4729
        mpd_set_negative(z);
inline
        
mpd_set_negative should always be inlined (cost=always) 
_mpd_qln
inline
        
mpd_set_negative inlined into _mpd_qln 
_mpd_qln
4730
    }
4731
4732
    mpd_maxcontext(&maxcontext);
inline
    
mpd_maxcontext will not be inlined into _mpd_qln because its definition is unavailable 
_mpd_qln
4733
    mpd_maxcontext(&varcontext);
inline
    
mpd_maxcontext will not be inlined into _mpd_qln because its definition is unavailable 
_mpd_qln
4734
    varcontext.round = MPD_ROUND_TRUNC;
4735
4736
    maxprec = ctx->prec + 2;
gvn
                   
load of type i64 not eliminated because it is clobbered by call 
_mpd_qln
4737
    if (t == 0 && (x <= 15 || x >= 800)) {
4738
        /* 0.900 <= v <= 1.15: Estimate the magnitude of the logarithm.
4739
         * If ln(v) will underflow, skip the loop. Otherwise, adjust the
4740
         * precision upwards in order to obtain a sufficient number of
4741
         * significant digits.
4742
         *
4743
         *   Case v > 1:
4744
         *      abs((v-1)/10) < abs((v-1)/v) < abs(ln(v)) < abs(v-1)
4745
         *   Case v < 1:
4746
         *      abs(v-1) < abs(ln(v)) < abs((v-1)/v) < abs((v-1)*10)
4747
         */
4748
        int cmp = _mpd_cmp(&v, &one);
inline
                  
_mpd_cmp too costly to inline (cost=550, threshold=250) 
_mpd_qln
inline
                  
_mpd_cmp will not be inlined into _mpd_qln 
_mpd_qln
4749
4750
        /* Upper bound (assume v > 1): abs(v-1), unrounded */
4751
        _mpd_qsub(&tmp, &v, &one, &maxcontext, &maxcontext.status);
inline
        
_mpd_qsub can be inlined into _mpd_qln with cost=20 (threshold=375) 
_mpd_qln
inline
        
_mpd_qsub inlined into _mpd_qln 
_mpd_qln
4752
        if (maxcontext.status & MPD_Errors) {
gvn
                       
load of type i32 not eliminated because it is clobbered by call 
_mpd_qln
4753
            mpd_seterror(result, MPD_Malloc_error, status);
inline
            
mpd_seterror can be inlined into _mpd_qln with cost=130 (threshold=250) 
_mpd_qln
inline
            
mpd_seterror inlined into _mpd_qln 
_mpd_qln
4754
            goto finish;
4755
        }
4756
4757
        if (cmp < 0) {
4758
            /* v < 1: abs((v-1)*10) */
4759
            tmp.exp += 1;
gvn
                    
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qln
4760
        }
4761
        if (mpd_adjexp(&tmp) < mpd_etiny(ctx)) {
inline
            
mpd_adjexp should always be inlined (cost=always) 
_mpd_qln
inline
            
mpd_adjexp inlined into _mpd_qln 
_mpd_qln
inline
                               
mpd_etiny should always be inlined (cost=always) 
_mpd_qln
inline
                               
mpd_etiny inlined into _mpd_qln 
_mpd_qln
4762
            /* The upper bound is less than etiny: Underflow to zero */
4763
            _settriple(result, (cmp<0), 1, mpd_etiny(ctx)-1);
inline
                                           
mpd_etiny should always be inlined (cost=always) 
_mpd_qln
inline
                                           
mpd_etiny inlined into _mpd_qln 
_mpd_qln
inline
            
_settriple can be inlined into _mpd_qln with cost=180 (threshold=250) 
_mpd_qln
inline
            
_settriple inlined into _mpd_qln 
_mpd_qln
4764
            goto finish;
4765
        }
4766
        /* Lower bound: abs((v-1)/10) or abs(v-1) */
4767
        tmp.exp -= 1;
4768
        if (mpd_adjexp(&tmp) < 0) {
inline
            
mpd_adjexp should always be inlined (cost=always) 
_mpd_qln
inline
            
mpd_adjexp inlined into _mpd_qln 
_mpd_qln
4769
            /* Absolute error of the loop: abs(z - log(v)) < 10**-p. If
4770
             * p = ctx->prec+2-adjexp(lower), then the relative error of
4771
             * the result is (using 10**adjexp(x) <= abs(x)):
4772
             *
4773
             *   abs(z - log(v)) / abs(log(v)) < 10**-p / abs(log(v))
4774
             *                                 <= 10**(-ctx->prec-2)
4775
             */
4776
            maxprec = maxprec - mpd_adjexp(&tmp);
inline
                                
mpd_adjexp should always be inlined (cost=always) 
_mpd_qln
inline
                                
mpd_adjexp inlined into _mpd_qln 
_mpd_qln
4777
        }
4778
    }
4779
4780
    i = ln_schedule_prec(klist, maxprec, 2);
inline
        
ln_schedule_prec can be inlined into _mpd_qln with cost=-14995 (threshold=325) 
_mpd_qln
inline
        
ln_schedule_prec inlined into _mpd_qln 
_mpd_qln
4781
    for (; i >= 0; i--) {
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
_mpd_qln
loop-vectorize
    
loop not vectorized 
_mpd_qln
4782
        varcontext.prec = 2*klist[i]+3;
licm
                   
hosting getelementptr 
_mpd_qln
4783
        z->flags ^= MPD_NEG;
licm
                 
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qln
gvn
                 
load of type i8 not eliminated because it is clobbered by call 
_mpd_qln
gvn
                 
load eliminated by PRE 
_mpd_qln
4784
        _mpd_qexp(&tmp, z, &varcontext, status);
inline
        
_mpd_qexp too costly to inline (cost=680, threshold=625) 
_mpd_qln
inline
        
_mpd_qexp will not be inlined into _mpd_qln 
_mpd_qln
4785
        z->flags ^= MPD_NEG;
licm
                 
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qln
gvn
                 
load of type i8 not eliminated because it is clobbered by call 
_mpd_qln
4786
4787
        if (v.digits > varcontext.prec) {
licm
              
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qln
licm
                                  
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qln
gvn
              
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qln
gvn
                                  
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qln
4788
            shift = v.digits - varcontext.prec;
4789
            mpd_qshiftr(&vtmp, &v, shift, status);
inline
            
mpd_qshiftr too costly to inline (cost=630, threshold=625) 
_mpd_qln
inline
            
mpd_qshiftr will not be inlined into _mpd_qln 
_mpd_qln
4790
            vtmp.exp += shift;
licm
                     
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qln
gvn
                     
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qln
4791
            mpd_qmul(&tmp, &vtmp, &tmp, &varcontext, status);
inline
            
mpd_qmul can be inlined into _mpd_qln with cost=45 (threshold=375) 
_mpd_qln
inline
            
mpd_qmul inlined into _mpd_qln 
_mpd_qln
4792
        }
4793
        else {
4794
            mpd_qmul(&tmp, &v, &tmp, &varcontext, status);
inline
            
mpd_qmul can be inlined into _mpd_qln with cost=45 (threshold=375) 
_mpd_qln
inline
            
mpd_qmul inlined into _mpd_qln 
_mpd_qln
4795
        }
4796
4797
        mpd_qsub(&tmp, &tmp, &one, &maxcontext, status);
inline
        
mpd_qsub too costly to inline (cost=670, threshold=625) 
_mpd_qln
inline
        
mpd_qsub will not be inlined into _mpd_qln 
_mpd_qln
4798
        mpd_qadd(z, z, &tmp, &maxcontext, status);
inline
        
mpd_qadd too costly to inline (cost=660, threshold=625) 
_mpd_qln
inline
        
mpd_qadd will not be inlined into _mpd_qln 
_mpd_qln
4799
        if (mpd_isspecial(z)) {
inline
            
mpd_isspecial should always be inlined (cost=always) 
_mpd_qln
inline
            
mpd_isspecial inlined into _mpd_qln 
_mpd_qln
4800
            break;
4801
        }
4802
    }
4803
4804
    /*
4805
     * Case t == 0:
4806
     *    t * log(10) == 0, the result does not change and the analysis
4807
     *    above applies. If v < 0.900 or v > 1.15, the relative error is
4808
     *    less than 10**(-ctx.prec-1).
4809
     * Case t != 0:
4810
     *      z := approx(log(v))
4811
     *      y := approx(log(10))
4812
     *      p := maxprec = ctx->prec + 2
4813
     *   Absolute errors:
4814
     *      1) abs(z - log(v)) < 10**-p
4815
     *      2) abs(y - log(10)) < 10**-p
4816
     *   The multiplication is exact, so:
4817
     *      3) abs(t*y - t*log(10)) < t*10**-p
4818
     *   The sum is exact, so:
4819
     *      4) abs((z + t*y) - (log(v) + t*log(10))) < (abs(t) + 1) * 10**-p
4820
     *   Bounds for log(v) and log(10):
4821
     *      5) -7/10 < log(v) < 17/10
4822
     *      6) 23/10 < log(10) < 24/10
4823
     *   Using 4), 5), 6) and t != 0, the relative error is:
4824
     *
4825
     *      7) relerr < ((abs(t) + 1)*10**-p) / abs(log(v) + t*log(10))
4826
     *                < 0.5 * 10**(-p + 1) = 0.5 * 10**(-ctx->prec-1)
4827
     */
4828
    mpd_qln10(&v, maxprec+1, status);
inline
    
mpd_qln10 too costly to inline (cost=630, threshold=625) 
_mpd_qln
inline
    
mpd_qln10 will not be inlined into _mpd_qln 
_mpd_qln
4829
    mpd_qmul_ssize(&tmp, &v, t, &maxcontext, status);
inline
    
mpd_qmul_ssize too costly to inline (cost=300, threshold=250) 
_mpd_qln
inline
    
mpd_qmul_ssize will not be inlined into _mpd_qln 
_mpd_qln
4830
    mpd_qadd(result, &tmp, z, &maxcontext, status);
inline
    
mpd_qadd too costly to inline (cost=660, threshold=625) 
_mpd_qln
inline
    
mpd_qadd will not be inlined into _mpd_qln 
_mpd_qln
4831
4832
4833
finish:
4834
    *status |= (MPD_Inexact|MPD_Rounded);
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qln
4835
    mpd_del(&v);
inline
    
mpd_del should always be inlined (cost=always) 
_mpd_qln
inline
    
mpd_del inlined into _mpd_qln 
_mpd_qln
4836
    mpd_del(&vtmp);
inline
    
mpd_del should always be inlined (cost=always) 
_mpd_qln
inline
    
mpd_del inlined into _mpd_qln 
_mpd_qln
4837
    mpd_del(&tmp);
inline
    
mpd_del should always be inlined (cost=always) 
_mpd_qln
inline
    
mpd_del inlined into _mpd_qln 
_mpd_qln
4838
}
4839
4840
/* ln(a) */
4841
void
4842
mpd_qln(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
4843
        uint32_t *status)
4844
{
4845
    mpd_context_t workctx;
4846
    mpd_ssize_t adjexp, t;
4847
4848
    if (mpd_isspecial(a)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qln
inline
        
mpd_isspecial inlined into mpd_qln 
mpd_qln
4849
        if (mpd_qcheck_nan(result, a, ctx, status)) {
inline
            
mpd_qcheck_nan too costly to inline (cost=335, threshold=250) 
mpd_qln
inline
            
mpd_qcheck_nan will not be inlined into mpd_qln 
mpd_qln
4850
            return;
4851
        }
4852
        if (mpd_isnegative(a)) {
inline
            
mpd_isnegative should always be inlined (cost=always) 
mpd_qln
inline
            
mpd_isnegative inlined into mpd_qln 
mpd_qln
4853
            mpd_seterror(result, MPD_Invalid_operation, status);
inline
            
mpd_seterror can be inlined into mpd_qln with cost=130 (threshold=250) 
mpd_qln
inline
            
mpd_seterror inlined into mpd_qln 
mpd_qln
4854
            return;
4855
        }
4856
        mpd_setspecial(result, MPD_POS, MPD_INF);
inline
        
mpd_setspecial can be inlined into mpd_qln with cost=115 (threshold=250) 
mpd_qln
inline
        
mpd_setspecial inlined into mpd_qln 
mpd_qln
4857
        return;
4858
    }
4859
    if (mpd_iszerocoeff(a)) {
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qln
inline
        
mpd_iszerocoeff inlined into mpd_qln 
mpd_qln
4860
        mpd_setspecial(result, MPD_NEG, MPD_INF);
inline
        
mpd_setspecial can be inlined into mpd_qln with cost=115 (threshold=250) 
mpd_qln
inline
        
mpd_setspecial inlined into mpd_qln 
mpd_qln
4861
        return;
4862
    }
4863
    if (mpd_isnegative(a)) {
inline
        
mpd_isnegative should always be inlined (cost=always) 
mpd_qln
inline
        
mpd_isnegative inlined into mpd_qln 
mpd_qln
4864
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qln with cost=130 (threshold=250) 
mpd_qln
inline
        
mpd_seterror inlined into mpd_qln 
mpd_qln
4865
        return;
4866
    }
4867
    if (_mpd_cmp(a, &one) == 0) {
inline
        
_mpd_cmp too costly to inline (cost=550, threshold=250) 
mpd_qln
inline
        
_mpd_cmp will not be inlined into mpd_qln 
mpd_qln
4868
        _settriple(result, MPD_POS, 0, 0);
inline
        
_settriple can be inlined into mpd_qln with cost=180 (threshold=250) 
mpd_qln
inline
        
_settriple inlined into mpd_qln 
mpd_qln
4869
        return;
4870
    }
4871
    /*
4872
     * Check if the result will overflow (0 < x, x != 1):
4873
     *   1) log10(x) < 0 iff adjexp(x) < 0
4874
     *   2) 0 < x /\ x <= y ==> adjexp(x) <= adjexp(y)
4875
     *   3) 0 < x /\ x != 1 ==> 2 * abs(log10(x)) < abs(log(x))
4876
     *   4) adjexp(x) <= log10(x) < adjexp(x) + 1
4877
     *
4878
     * Case adjexp(x) >= 0:
4879
     *   5) 2 * adjexp(x) < abs(log(x))
4880
     *   Case adjexp(x) > 0:
4881
     *     6) adjexp(2 * adjexp(x)) <= adjexp(abs(log(x)))
4882
     *   Case adjexp(x) == 0:
4883
     *     mpd_exp_digits(t)-1 == 0 <= emax (the shortcut is not triggered)
4884
     *
4885
     * Case adjexp(x) < 0:
4886
     *   7) 2 * (-adjexp(x) - 1) < abs(log(x))
4887
     *   Case adjexp(x) < -1:
4888
     *     8) adjexp(2 * (-adjexp(x) - 1)) <= adjexp(abs(log(x)))
4889
     *   Case adjexp(x) == -1:
4890
     *     mpd_exp_digits(t)-1 == 0 <= emax (the shortcut is not triggered)
4891
     */
4892
    adjexp = mpd_adjexp(a);
inline
             
mpd_adjexp should always be inlined (cost=always) 
mpd_qln
inline
             
mpd_adjexp inlined into mpd_qln 
mpd_qln
4893
    t = (adjexp < 0) ? -adjexp-1 : adjexp;
4894
    t *= 2;
4895
    if (mpd_exp_digits(t)-1 > ctx->emax) {
inline
        
mpd_exp_digits can be inlined into mpd_qln with cost=265 (threshold=325) 
mpd_qln
inline
        
mpd_exp_digits inlined into mpd_qln 
mpd_qln
gvn
                                   
load of type i64 not eliminated because it is clobbered by call 
mpd_qln
4896
        *status |= MPD_Overflow|MPD_Inexact|MPD_Rounded;
gvn
                
load of type i32 not eliminated because it is clobbered by call 
mpd_qln
4897
        mpd_setspecial(result, (adjexp<0), MPD_INF);
inline
        
mpd_setspecial can be inlined into mpd_qln with cost=120 (threshold=250) 
mpd_qln
inline
        
mpd_setspecial inlined into mpd_qln 
mpd_qln
4898
        return;
4899
    }
4900
4901
    workctx = *ctx;
4902
    workctx.round = MPD_ROUND_HALF_EVEN;
4903
4904
    if (ctx->allcr) {
gvn
             
load of type i32 not eliminated because it is clobbered by call 
mpd_qln
4905
        MPD_NEW_STATIC(t1, 0,0,0,0);
4906
        MPD_NEW_STATIC(t2, 0,0,0,0);
4907
        MPD_NEW_STATIC(ulp, 0,0,0,0);
4908
        MPD_NEW_STATIC(aa, 0,0,0,0);
4909
        mpd_ssize_t prec;
4910
4911
        if (result == a) {
4912
            if (!mpd_qcopy(&aa, a, status)) {
inline
                 
mpd_qcopy can be inlined into mpd_qln with cost=215 (threshold=250) 
mpd_qln
inline
                 
mpd_qcopy inlined into mpd_qln 
mpd_qln
4913
                mpd_seterror(result, MPD_Malloc_error, status);
inline
                
mpd_seterror can be inlined into mpd_qln with cost=130 (threshold=250) 
mpd_qln
inline
                
mpd_seterror inlined into mpd_qln 
mpd_qln
4914
                return;
4915
            }
4916
            a = &aa;
4917
        }
4918
4919
        workctx.clamp = 0;
4920
        prec = ctx->prec + 3;
gvn
                    
load of type i64 not eliminated because it is clobbered by call 
mpd_qln
4921
        while (1) {
loop-vectorize
        
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qln
loop-vectorize
        
loop not vectorized 
mpd_qln
4922
            workctx.prec = prec;
licm
                    
hosting getelementptr 
mpd_qln
4923
            _mpd_qln(result, a, &workctx, status);
inline
            
_mpd_qln too costly to inline (cost=630, threshold=625) 
mpd_qln
inline
            
_mpd_qln will not be inlined into mpd_qln 
mpd_qln
4924
            _ssettriple(&ulp, MPD_POS, 1,
inline
            
_ssettriple too costly to inline (cost=300, threshold=250) 
mpd_qln
inline
            
_ssettriple will not be inlined into mpd_qln 
mpd_qln
4925
                        result->exp + result->digits-workctx.prec);
licm
                                
hosting getelementptr 
mpd_qln
licm
                                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qln
licm
                                              
hosting getelementptr 
mpd_qln
licm
                                              
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qln
licm
                                                             
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qln
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
mpd_qln
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qln
gvn
                                                             
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qln
4926
4927
            workctx.prec = ctx->prec;
licm
                                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qln
gvn
                                
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qln
4928
            mpd_qadd(&t1, result, &ulp, &workctx, &workctx.status);
inline
            
mpd_qadd too costly to inline (cost=660, threshold=625) 
mpd_qln
inline
            
mpd_qadd will not be inlined into mpd_qln 
mpd_qln
licm
                                                           
hosting getelementptr 
mpd_qln
4929
            mpd_qsub(&t2, result, &ulp, &workctx, &workctx.status);
inline
            
mpd_qsub too costly to inline (cost=670, threshold=625) 
mpd_qln
inline
            
mpd_qsub will not be inlined into mpd_qln 
mpd_qln
4930
            if (mpd_isspecial(result) || mpd_iszerocoeff(result) ||
inline
                
mpd_isspecial should always be inlined (cost=always) 
mpd_qln
inline
                
mpd_isspecial inlined into mpd_qln 
mpd_qln
inline
                                         
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qln
inline
                                         
mpd_iszerocoeff inlined into mpd_qln 
mpd_qln
4931
                mpd_qcmp(&t1, &t2, status) == 0) {
inline
                
mpd_qcmp can be inlined into mpd_qln with cost=85 (threshold=250) 
mpd_qln
inline
                
mpd_qcmp inlined into mpd_qln 
mpd_qln
4932
                workctx.clamp = ctx->clamp;
gvn
                                     
load of type i32 not eliminated because it is clobbered by call 
mpd_qln
4933
                mpd_check_underflow(result, &workctx, status);
inline
                
mpd_check_underflow can be inlined into mpd_qln with cost=70 (threshold=325) 
mpd_qln
inline
                
mpd_check_underflow inlined into mpd_qln 
mpd_qln
4934
                mpd_qfinalize(result, &workctx, status);
inline
                
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qln
inline
                
mpd_qfinalize will not be inlined into mpd_qln 
mpd_qln
4935
                break;
4936
            }
4937
            prec += MPD_RDIGITS;
4938
        }
4939
        mpd_del(&t1);
inline
        
mpd_del should always be inlined (cost=always) 
mpd_qln
inline
        
mpd_del inlined into mpd_qln 
mpd_qln
4940
        mpd_del(&t2);
inline
        
mpd_del should always be inlined (cost=always) 
mpd_qln
inline
        
mpd_del inlined into mpd_qln 
mpd_qln
4941
        mpd_del(&ulp);
inline
        
mpd_del should always be inlined (cost=always) 
mpd_qln
inline
        
mpd_del inlined into mpd_qln 
mpd_qln
4942
        mpd_del(&aa);
inline
        
mpd_del should always be inlined (cost=always) 
mpd_qln
inline
        
mpd_del inlined into mpd_qln 
mpd_qln
4943
    }
4944
    else {
4945
        _mpd_qln(result, a, &workctx, status);
inline
        
_mpd_qln too costly to inline (cost=630, threshold=625) 
mpd_qln
inline
        
_mpd_qln will not be inlined into mpd_qln 
mpd_qln
4946
        mpd_check_underflow(result, &workctx, status);
inline
        
mpd_check_underflow can be inlined into mpd_qln with cost=70 (threshold=325) 
mpd_qln
inline
        
mpd_check_underflow inlined into mpd_qln 
mpd_qln
4947
        mpd_qfinalize(result, &workctx, status);
inline
        
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qln
inline
        
mpd_qfinalize will not be inlined into mpd_qln 
mpd_qln
4948
    }
4949
}
4950
4951
/*
4952
 * Internal log10() function that does not check for specials, zero or one.
4953
 * Case SKIP_FINALIZE:
4954
 *   Relative error: abs(result - log10(a)) < 0.1 * 10**-prec * abs(log10(a))
4955
 * Case DO_FINALIZE:
4956
 *   Ulp error: abs(result - log10(a)) < ulp(log10(a))
4957
 */
4958
enum {SKIP_FINALIZE, DO_FINALIZE};
4959
static void
4960
_mpd_qlog10(int action, mpd_t *result, const mpd_t *a,
4961
            const mpd_context_t *ctx, uint32_t *status)
4962
{
4963
    mpd_context_t workctx;
4964
    MPD_NEW_STATIC(ln10,0,0,0,0);
4965
4966
    mpd_maxcontext(&workctx);
inline
    
mpd_maxcontext will not be inlined into _mpd_qlog10 because its definition is unavailable 
_mpd_qlog10
4967
    workctx.prec = ctx->prec + 3;
gvn
                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_qlog10
4968
    /* relative error: 0.1 * 10**(-p-3). The specific underflow shortcut
4969
     * in _mpd_qln() does not change the final result. */
4970
    _mpd_qln(result, a, &workctx, status);
inline
    
_mpd_qln too costly to inline (cost=630, threshold=625) 
_mpd_qlog10
inline
    
_mpd_qln will not be inlined into _mpd_qlog10 
_mpd_qlog10
4971
    /* relative error: 5 * 10**(-p-3) */
4972
    mpd_qln10(&ln10, workctx.prec, status);
inline
    
mpd_qln10 too costly to inline (cost=630, threshold=625) 
_mpd_qlog10
inline
    
mpd_qln10 will not be inlined into _mpd_qlog10 
_mpd_qlog10
gvn
                             
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qlog10
4973
4974
    if (action == DO_FINALIZE) {
4975
        workctx = *ctx;
4976
        workctx.round = MPD_ROUND_HALF_EVEN;
4977
    }
4978
    /* SKIP_FINALIZE: relative error: 5 * 10**(-p-3) */
4979
    _mpd_qdiv(NO_IDEAL_EXP, result, result, &ln10, &workctx, status);
inline
    
_mpd_qdiv too costly to inline (cost=675, threshold=625) 
_mpd_qlog10
inline
    
_mpd_qdiv will not be inlined into _mpd_qlog10 
_mpd_qlog10
4980
4981
    mpd_del(&ln10);
inline
    
mpd_del should always be inlined (cost=always) 
_mpd_qlog10
inline
    
mpd_del inlined into _mpd_qlog10 
_mpd_qlog10
4982
}
4983
4984
/* log10(a) */
4985
void
4986
mpd_qlog10(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
4987
           uint32_t *status)
4988
{
4989
    mpd_context_t workctx;
4990
    mpd_ssize_t adjexp, t;
4991
4992
    workctx = *ctx;
4993
    workctx.round = MPD_ROUND_HALF_EVEN;
4994
4995
    if (mpd_isspecial(a)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qlog10
inline
        
mpd_isspecial inlined into mpd_qlog10 
mpd_qlog10
4996
        if (mpd_qcheck_nan(result, a, ctx, status)) {
inline
            
mpd_qcheck_nan too costly to inline (cost=335, threshold=250) 
mpd_qlog10
inline
            
mpd_qcheck_nan will not be inlined into mpd_qlog10 
mpd_qlog10
4997
            return;
4998
        }
4999
        if (mpd_isnegative(a)) {
inline
            
mpd_isnegative should always be inlined (cost=always) 
mpd_qlog10
inline
            
mpd_isnegative inlined into mpd_qlog10 
mpd_qlog10
5000
            mpd_seterror(result, MPD_Invalid_operation, status);
inline
            
mpd_seterror can be inlined into mpd_qlog10 with cost=130 (threshold=250) 
mpd_qlog10
inline
            
mpd_seterror inlined into mpd_qlog10 
mpd_qlog10
5001
            return;
5002
        }
5003
        mpd_setspecial(result, MPD_POS, MPD_INF);
inline
        
mpd_setspecial can be inlined into mpd_qlog10 with cost=115 (threshold=250) 
mpd_qlog10
inline
        
mpd_setspecial inlined into mpd_qlog10 
mpd_qlog10
5004
        return;
5005
    }
5006
    if (mpd_iszerocoeff(a)) {
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qlog10
inline
        
mpd_iszerocoeff inlined into mpd_qlog10 
mpd_qlog10
5007
        mpd_setspecial(result, MPD_NEG, MPD_INF);
inline
        
mpd_setspecial can be inlined into mpd_qlog10 with cost=115 (threshold=250) 
mpd_qlog10
inline
        
mpd_setspecial inlined into mpd_qlog10 
mpd_qlog10
5008
        return;
5009
    }
5010
    if (mpd_isnegative(a)) {
inline
        
mpd_isnegative should always be inlined (cost=always) 
mpd_qlog10
inline
        
mpd_isnegative inlined into mpd_qlog10 
mpd_qlog10
5011
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qlog10 with cost=130 (threshold=250) 
mpd_qlog10
inline
        
mpd_seterror inlined into mpd_qlog10 
mpd_qlog10
5012
        return;
5013
    }
5014
    if (mpd_coeff_ispow10(a)) {
inline
        
mpd_coeff_ispow10 can be inlined into mpd_qlog10 with cost=-14680 (threshold=250) 
mpd_qlog10
inline
        
mpd_coeff_ispow10 inlined into mpd_qlog10 
mpd_qlog10
5015
        uint8_t sign = 0;
5016
        adjexp = mpd_adjexp(a);
5017
        if (adjexp < 0) {
5018
            sign = 1;
5019
            adjexp = -adjexp;
5020
        }
5021
        _settriple(result, sign, adjexp, 0);
inline
        
_settriple can be inlined into mpd_qlog10 with cost=185 (threshold=250) 
mpd_qlog10
inline
        
_settriple inlined into mpd_qlog10 
mpd_qlog10
5022
        mpd_qfinalize(result, &workctx, status);
inline
        
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qlog10
inline
        
mpd_qfinalize will not be inlined into mpd_qlog10 
mpd_qlog10
5023
        return;
5024
    }
5025
    /*
5026
     * Check if the result will overflow (0 < x, x != 1):
5027
     *   1) log10(x) < 0 iff adjexp(x) < 0
5028
     *   2) 0 < x /\ x <= y ==> adjexp(x) <= adjexp(y)
5029
     *   3) adjexp(x) <= log10(x) < adjexp(x) + 1
5030
     *
5031
     * Case adjexp(x) >= 0:
5032
     *   4) adjexp(x) <= abs(log10(x))
5033
     *   Case adjexp(x) > 0:
5034
     *     5) adjexp(adjexp(x)) <= adjexp(abs(log10(x)))
5035
     *   Case adjexp(x) == 0:
5036
     *     mpd_exp_digits(t)-1 == 0 <= emax (the shortcut is not triggered)
5037
     *
5038
     * Case adjexp(x) < 0:
5039
     *   6) -adjexp(x) - 1 < abs(log10(x))
5040
     *   Case adjexp(x) < -1:
5041
     *     7) adjexp(-adjexp(x) - 1) <= adjexp(abs(log(x)))
5042
     *   Case adjexp(x) == -1:
5043
     *     mpd_exp_digits(t)-1 == 0 <= emax (the shortcut is not triggered)
5044
     */
5045
    adjexp = mpd_adjexp(a);
inline
             
mpd_adjexp should always be inlined (cost=always) 
mpd_qlog10
inline
             
mpd_adjexp inlined into mpd_qlog10 
mpd_qlog10
5046
    t = (adjexp < 0) ? -adjexp-1 : adjexp;
5047
    if (mpd_exp_digits(t)-1 > ctx->emax) {
inline
        
mpd_exp_digits can be inlined into mpd_qlog10 with cost=265 (threshold=325) 
mpd_qlog10
inline
        
mpd_exp_digits inlined into mpd_qlog10 
mpd_qlog10
5048
        *status |= MPD_Overflow|MPD_Inexact|MPD_Rounded;
5049
        mpd_setspecial(result, (adjexp<0), MPD_INF);
inline
        
mpd_setspecial can be inlined into mpd_qlog10 with cost=120 (threshold=250) 
mpd_qlog10
inline
        
mpd_setspecial inlined into mpd_qlog10 
mpd_qlog10
5050
        return;
5051
    }
5052
5053
    if (ctx->allcr) {
5054
        MPD_NEW_STATIC(t1, 0,0,0,0);
5055
        MPD_NEW_STATIC(t2, 0,0,0,0);
5056
        MPD_NEW_STATIC(ulp, 0,0,0,0);
5057
        MPD_NEW_STATIC(aa, 0,0,0,0);
5058
        mpd_ssize_t prec;
5059
5060
        if (result == a) {
5061
            if (!mpd_qcopy(&aa, a, status)) {
inline
                 
mpd_qcopy can be inlined into mpd_qlog10 with cost=215 (threshold=250) 
mpd_qlog10
inline
                 
mpd_qcopy inlined into mpd_qlog10 
mpd_qlog10
5062
                mpd_seterror(result, MPD_Malloc_error, status);
inline
                
mpd_seterror can be inlined into mpd_qlog10 with cost=130 (threshold=250) 
mpd_qlog10
inline
                
mpd_seterror inlined into mpd_qlog10 
mpd_qlog10
5063
                return;
5064
            }
5065
            a = &aa;
5066
        }
5067
5068
        workctx.clamp = 0;
5069
        prec = ctx->prec + 3;
gvn
                    
load of type i64 not eliminated because it is clobbered by call 
mpd_qlog10
5070
        while (1) {
loop-vectorize
        
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qlog10
loop-vectorize
        
loop not vectorized 
mpd_qlog10
5071
            workctx.prec = prec;
licm
                    
hosting getelementptr 
mpd_qlog10
5072
            _mpd_qlog10(SKIP_FINALIZE, result, a, &workctx, status);
inline
            
_mpd_qlog10 too costly to inline (cost=255, threshold=250) 
mpd_qlog10
inline
            
_mpd_qlog10 will not be inlined into mpd_qlog10 
mpd_qlog10
5073
            _ssettriple(&ulp, MPD_POS, 1,
inline
            
_ssettriple too costly to inline (cost=300, threshold=250) 
mpd_qlog10
inline
            
_ssettriple will not be inlined into mpd_qlog10 
mpd_qlog10
5074
                        result->exp + result->digits-workctx.prec);
licm
                                
hosting getelementptr 
mpd_qlog10
licm
                                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qlog10
licm
                                              
hosting getelementptr 
mpd_qlog10
licm
                                              
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qlog10
licm
                                                             
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qlog10
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
mpd_qlog10
gvn
                                              
load of type i64 not eliminated because it is clobbered by call 
mpd_qlog10
gvn
                                                             
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qlog10
5075
5076
            workctx.prec = ctx->prec;
licm
                                
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qlog10
gvn
                                
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qlog10
5077
            mpd_qadd(&t1, result, &ulp, &workctx, &workctx.status);
inline
            
mpd_qadd too costly to inline (cost=660, threshold=625) 
mpd_qlog10
inline
            
mpd_qadd will not be inlined into mpd_qlog10 
mpd_qlog10
licm
                                                           
hosting getelementptr 
mpd_qlog10
5078
            mpd_qsub(&t2, result, &ulp, &workctx, &workctx.status);
inline
            
mpd_qsub too costly to inline (cost=670, threshold=625) 
mpd_qlog10
inline
            
mpd_qsub will not be inlined into mpd_qlog10 
mpd_qlog10
5079
            if (mpd_isspecial(result) || mpd_iszerocoeff(result) ||
inline
                
mpd_isspecial should always be inlined (cost=always) 
mpd_qlog10
inline
                
mpd_isspecial inlined into mpd_qlog10 
mpd_qlog10
inline
                                         
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qlog10
inline
                                         
mpd_iszerocoeff inlined into mpd_qlog10 
mpd_qlog10
5080
                mpd_qcmp(&t1, &t2, status) == 0) {
inline
                
mpd_qcmp can be inlined into mpd_qlog10 with cost=85 (threshold=250) 
mpd_qlog10
inline
                
mpd_qcmp inlined into mpd_qlog10 
mpd_qlog10
5081
                workctx.clamp = ctx->clamp;
gvn
                                     
load of type i32 not eliminated because it is clobbered by call 
mpd_qlog10
5082
                mpd_check_underflow(result, &workctx, status);
inline
                
mpd_check_underflow can be inlined into mpd_qlog10 with cost=-14930 (threshold=325) 
mpd_qlog10
inline
                
mpd_check_underflow inlined into mpd_qlog10 
mpd_qlog10
5083
                mpd_qfinalize(result, &workctx, status);
inline
                
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qlog10
inline
                
mpd_qfinalize will not be inlined into mpd_qlog10 
mpd_qlog10
5084
                break;
5085
            }
5086
            prec += MPD_RDIGITS;
5087
        }
5088
        mpd_del(&t1);
inline
        
mpd_del should always be inlined (cost=always) 
mpd_qlog10
inline
        
mpd_del inlined into mpd_qlog10 
mpd_qlog10
5089
        mpd_del(&t2);
inline
        
mpd_del should always be inlined (cost=always) 
mpd_qlog10
inline
        
mpd_del inlined into mpd_qlog10 
mpd_qlog10
5090
        mpd_del(&ulp);
inline
        
mpd_del should always be inlined (cost=always) 
mpd_qlog10
inline
        
mpd_del inlined into mpd_qlog10 
mpd_qlog10
5091
        mpd_del(&aa);
inline
        
mpd_del should always be inlined (cost=always) 
mpd_qlog10
inline
        
mpd_del inlined into mpd_qlog10 
mpd_qlog10
5092
    }
5093
    else {
5094
        _mpd_qlog10(DO_FINALIZE, result, a, &workctx, status);
inline
        
_mpd_qlog10 too costly to inline (cost=265, threshold=250) 
mpd_qlog10
inline
        
_mpd_qlog10 will not be inlined into mpd_qlog10 
mpd_qlog10
5095
        mpd_check_underflow(result, &workctx, status);
inline
        
mpd_check_underflow can be inlined into mpd_qlog10 with cost=70 (threshold=325) 
mpd_qlog10
inline
        
mpd_check_underflow inlined into mpd_qlog10 
mpd_qlog10
5096
    }
5097
}
5098
5099
/*
5100
 * Maximum of the two operands. Attention: If one operand is a quiet NaN and the
5101
 * other is numeric, the numeric operand is returned. This may not be what one
5102
 * expects.
5103
 */
5104
void
5105
mpd_qmax(mpd_t *result, const mpd_t *a, const mpd_t *b,
5106
         const mpd_context_t *ctx, uint32_t *status)
5107
{
5108
    int c;
5109
5110
    if (mpd_isqnan(a) && !mpd_isnan(b)) {
inline
        
mpd_isqnan should always be inlined (cost=always) 
mpd_qmax
inline
        
mpd_isqnan inlined into mpd_qmax 
mpd_qmax
inline
                          
mpd_isnan should always be inlined (cost=always) 
mpd_qmax
inline
                          
mpd_isnan inlined into mpd_qmax 
mpd_qmax
5111
        mpd_qcopy(result, b, status);
inline
        
mpd_qcopy can be inlined into mpd_qmax with cost=215 (threshold=250) 
mpd_qmax
inline
        
mpd_qcopy inlined into mpd_qmax 
mpd_qmax
5112
    }
5113
    else if (mpd_isqnan(b) && !mpd_isnan(a)) {
inline
             
mpd_isqnan should always be inlined (cost=always) 
mpd_qmax
inline
             
mpd_isqnan inlined into mpd_qmax 
mpd_qmax
inline
                               
mpd_isnan should always be inlined (cost=always) 
mpd_qmax
inline
                               
mpd_isnan inlined into mpd_qmax 
mpd_qmax
5114
        mpd_qcopy(result, a, status);
inline
        
mpd_qcopy can be inlined into mpd_qmax with cost=215 (threshold=250) 
mpd_qmax
inline
        
mpd_qcopy inlined into mpd_qmax 
mpd_qmax
5115
    }
5116
    else if (mpd_qcheck_nans(result, a, b, ctx, status)) {
inline
             
mpd_qcheck_nans too costly to inline (cost=385, threshold=250) 
mpd_qmax
inline
             
mpd_qcheck_nans will not be inlined into mpd_qmax 
mpd_qmax
5117
        return;
5118
    }
5119
    else {
5120
        c = _mpd_cmp(a, b);
inline
            
_mpd_cmp too costly to inline (cost=550, threshold=250) 
mpd_qmax
inline
            
_mpd_cmp will not be inlined into mpd_qmax 
mpd_qmax
5121
        if (c == 0) {
5122
            c = _mpd_cmp_numequal(a, b);
inline
                
_mpd_cmp_numequal can be inlined into mpd_qmax with cost=50 (threshold=325) 
mpd_qmax
inline
                
_mpd_cmp_numequal inlined into mpd_qmax 
mpd_qmax
5123
        }
5124
5125
        if (c < 0) {
5126
            mpd_qcopy(result, b, status);
inline
            
mpd_qcopy can be inlined into mpd_qmax with cost=215 (threshold=250) 
mpd_qmax
inline
            
mpd_qcopy inlined into mpd_qmax 
mpd_qmax
5127
        }
5128
        else {
5129
            mpd_qcopy(result, a, status);
inline
            
mpd_qcopy can be inlined into mpd_qmax with cost=215 (threshold=250) 
mpd_qmax
inline
            
mpd_qcopy inlined into mpd_qmax 
mpd_qmax
5130
        }
5131
    }
5132
5133
    mpd_qfinalize(result, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qmax
inline
    
mpd_qfinalize will not be inlined into mpd_qmax 
mpd_qmax
5134
}
5135
5136
/*
5137
 * Maximum magnitude: Same as mpd_max(), but compares the operands with their
5138
 * sign ignored.
5139
 */
5140
void
5141
mpd_qmax_mag(mpd_t *result, const mpd_t *a, const mpd_t *b,
5142
             const mpd_context_t *ctx, uint32_t *status)
5143
{
5144
    int c;
5145
5146
    if (mpd_isqnan(a) && !mpd_isnan(b)) {
inline
        
mpd_isqnan should always be inlined (cost=always) 
mpd_qmax_mag
inline
        
mpd_isqnan inlined into mpd_qmax_mag 
mpd_qmax_mag
inline
                          
mpd_isnan should always be inlined (cost=always) 
mpd_qmax_mag
inline
                          
mpd_isnan inlined into mpd_qmax_mag 
mpd_qmax_mag
5147
        mpd_qcopy(result, b, status);
inline
        
mpd_qcopy can be inlined into mpd_qmax_mag with cost=215 (threshold=250) 
mpd_qmax_mag
inline
        
mpd_qcopy inlined into mpd_qmax_mag 
mpd_qmax_mag
5148
    }
5149
    else if (mpd_isqnan(b) && !mpd_isnan(a)) {
inline
             
mpd_isqnan should always be inlined (cost=always) 
mpd_qmax_mag
inline
             
mpd_isqnan inlined into mpd_qmax_mag 
mpd_qmax_mag
inline
                               
mpd_isnan should always be inlined (cost=always) 
mpd_qmax_mag
inline
                               
mpd_isnan inlined into mpd_qmax_mag 
mpd_qmax_mag
5150
        mpd_qcopy(result, a, status);
inline
        
mpd_qcopy can be inlined into mpd_qmax_mag with cost=215 (threshold=250) 
mpd_qmax_mag
inline
        
mpd_qcopy inlined into mpd_qmax_mag 
mpd_qmax_mag
5151
    }
5152
    else if (mpd_qcheck_nans(result, a, b, ctx, status)) {
inline
             
mpd_qcheck_nans too costly to inline (cost=385, threshold=250) 
mpd_qmax_mag
inline
             
mpd_qcheck_nans will not be inlined into mpd_qmax_mag 
mpd_qmax_mag
5153
        return;
5154
    }
5155
    else {
5156
        c = _mpd_cmp_abs(a, b);
inline
            
_mpd_cmp_abs too costly to inline (cost=360, threshold=250) 
mpd_qmax_mag
inline
            
_mpd_cmp_abs will not be inlined into mpd_qmax_mag 
mpd_qmax_mag
5157
        if (c == 0) {
5158
            c = _mpd_cmp_numequal(a, b);
inline
                
_mpd_cmp_numequal can be inlined into mpd_qmax_mag with cost=50 (threshold=325) 
mpd_qmax_mag
inline
                
_mpd_cmp_numequal inlined into mpd_qmax_mag 
mpd_qmax_mag
5159
        }
5160
5161
        if (c < 0) {
5162
            mpd_qcopy(result, b, status);
inline
            
mpd_qcopy can be inlined into mpd_qmax_mag with cost=215 (threshold=250) 
mpd_qmax_mag
inline
            
mpd_qcopy inlined into mpd_qmax_mag 
mpd_qmax_mag
5163
        }
5164
        else {
5165
            mpd_qcopy(result, a, status);
inline
            
mpd_qcopy can be inlined into mpd_qmax_mag with cost=215 (threshold=250) 
mpd_qmax_mag
inline
            
mpd_qcopy inlined into mpd_qmax_mag 
mpd_qmax_mag
5166
        }
5167
    }
5168
5169
    mpd_qfinalize(result, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qmax_mag
inline
    
mpd_qfinalize will not be inlined into mpd_qmax_mag 
mpd_qmax_mag
5170
}
5171
5172
/*
5173
 * Minimum of the two operands. Attention: If one operand is a quiet NaN and the
5174
 * other is numeric, the numeric operand is returned. This may not be what one
5175
 * expects.
5176
 */
5177
void
5178
mpd_qmin(mpd_t *result, const mpd_t *a, const mpd_t *b,
5179
         const mpd_context_t *ctx, uint32_t *status)
5180
{
5181
    int c;
5182
5183
    if (mpd_isqnan(a) && !mpd_isnan(b)) {
inline
        
mpd_isqnan should always be inlined (cost=always) 
mpd_qmin
inline
        
mpd_isqnan inlined into mpd_qmin 
mpd_qmin
inline
                          
mpd_isnan should always be inlined (cost=always) 
mpd_qmin
inline
                          
mpd_isnan inlined into mpd_qmin 
mpd_qmin
5184
        mpd_qcopy(result, b, status);
inline
        
mpd_qcopy can be inlined into mpd_qmin with cost=215 (threshold=250) 
mpd_qmin
inline
        
mpd_qcopy inlined into mpd_qmin 
mpd_qmin
5185
    }
5186
    else if (mpd_isqnan(b) && !mpd_isnan(a)) {
inline
             
mpd_isqnan should always be inlined (cost=always) 
mpd_qmin
inline
             
mpd_isqnan inlined into mpd_qmin 
mpd_qmin
inline
                               
mpd_isnan should always be inlined (cost=always) 
mpd_qmin
inline
                               
mpd_isnan inlined into mpd_qmin 
mpd_qmin
5187
        mpd_qcopy(result, a, status);
inline
        
mpd_qcopy can be inlined into mpd_qmin with cost=215 (threshold=250) 
mpd_qmin
inline
        
mpd_qcopy inlined into mpd_qmin 
mpd_qmin
5188
    }
5189
    else if (mpd_qcheck_nans(result, a, b, ctx, status)) {
inline
             
mpd_qcheck_nans too costly to inline (cost=385, threshold=250) 
mpd_qmin
inline
             
mpd_qcheck_nans will not be inlined into mpd_qmin 
mpd_qmin
5190
        return;
5191
    }
5192
    else {
5193
        c = _mpd_cmp(a, b);
inline
            
_mpd_cmp too costly to inline (cost=550, threshold=250) 
mpd_qmin
inline
            
_mpd_cmp will not be inlined into mpd_qmin 
mpd_qmin
5194
        if (c == 0) {
5195
            c = _mpd_cmp_numequal(a, b);
inline
                
_mpd_cmp_numequal can be inlined into mpd_qmin with cost=50 (threshold=325) 
mpd_qmin
inline
                
_mpd_cmp_numequal inlined into mpd_qmin 
mpd_qmin
5196
        }
5197
5198
        if (c < 0) {
5199
            mpd_qcopy(result, a, status);
inline
            
mpd_qcopy can be inlined into mpd_qmin with cost=215 (threshold=250) 
mpd_qmin
inline
            
mpd_qcopy inlined into mpd_qmin 
mpd_qmin
5200
        }
5201
        else {
5202
            mpd_qcopy(result, b, status);
inline
            
mpd_qcopy can be inlined into mpd_qmin with cost=215 (threshold=250) 
mpd_qmin
inline
            
mpd_qcopy inlined into mpd_qmin 
mpd_qmin
5203
        }
5204
    }
5205
5206
    mpd_qfinalize(result, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qmin
inline
    
mpd_qfinalize will not be inlined into mpd_qmin 
mpd_qmin
5207
}
5208
5209
/*
5210
 * Minimum magnitude: Same as mpd_min(), but compares the operands with their
5211
 * sign ignored.
5212
 */
5213
void
5214
mpd_qmin_mag(mpd_t *result, const mpd_t *a, const mpd_t *b,
5215
             const mpd_context_t *ctx, uint32_t *status)
5216
{
5217
    int c;
5218
5219
    if (mpd_isqnan(a) && !mpd_isnan(b)) {
inline
        
mpd_isqnan should always be inlined (cost=always) 
mpd_qmin_mag
inline
        
mpd_isqnan inlined into mpd_qmin_mag 
mpd_qmin_mag
inline
                          
mpd_isnan should always be inlined (cost=always) 
mpd_qmin_mag
inline
                          
mpd_isnan inlined into mpd_qmin_mag 
mpd_qmin_mag
5220
        mpd_qcopy(result, b, status);
inline
        
mpd_qcopy can be inlined into mpd_qmin_mag with cost=215 (threshold=250) 
mpd_qmin_mag
inline
        
mpd_qcopy inlined into mpd_qmin_mag 
mpd_qmin_mag
5221
    }
5222
    else if (mpd_isqnan(b) && !mpd_isnan(a)) {
inline
             
mpd_isqnan should always be inlined (cost=always) 
mpd_qmin_mag
inline
             
mpd_isqnan inlined into mpd_qmin_mag 
mpd_qmin_mag
inline
                               
mpd_isnan should always be inlined (cost=always) 
mpd_qmin_mag
inline
                               
mpd_isnan inlined into mpd_qmin_mag 
mpd_qmin_mag
5223
        mpd_qcopy(result, a, status);
inline
        
mpd_qcopy can be inlined into mpd_qmin_mag with cost=215 (threshold=250) 
mpd_qmin_mag
inline
        
mpd_qcopy inlined into mpd_qmin_mag 
mpd_qmin_mag
5224
    }
5225
    else if (mpd_qcheck_nans(result, a, b, ctx, status)) {
inline
             
mpd_qcheck_nans too costly to inline (cost=385, threshold=250) 
mpd_qmin_mag
inline
             
mpd_qcheck_nans will not be inlined into mpd_qmin_mag 
mpd_qmin_mag
5226
        return;
5227
    }
5228
    else {
5229
        c = _mpd_cmp_abs(a, b);
inline
            
_mpd_cmp_abs too costly to inline (cost=360, threshold=250) 
mpd_qmin_mag
inline
            
_mpd_cmp_abs will not be inlined into mpd_qmin_mag 
mpd_qmin_mag
5230
        if (c == 0) {
5231
            c = _mpd_cmp_numequal(a, b);
inline
                
_mpd_cmp_numequal can be inlined into mpd_qmin_mag with cost=-14950 (threshold=325) 
mpd_qmin_mag
inline
                
_mpd_cmp_numequal inlined into mpd_qmin_mag 
mpd_qmin_mag
5232
        }
5233
5234
        if (c < 0) {
5235
            mpd_qcopy(result, a, status);
inline
            
mpd_qcopy can be inlined into mpd_qmin_mag with cost=215 (threshold=250) 
mpd_qmin_mag
inline
            
mpd_qcopy inlined into mpd_qmin_mag 
mpd_qmin_mag
5236
        }
5237
        else {
5238
            mpd_qcopy(result, b, status);
inline
            
mpd_qcopy can be inlined into mpd_qmin_mag with cost=215 (threshold=250) 
mpd_qmin_mag
inline
            
mpd_qcopy inlined into mpd_qmin_mag 
mpd_qmin_mag
5239
        }
5240
    }
5241
5242
    mpd_qfinalize(result, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qmin_mag
inline
    
mpd_qfinalize will not be inlined into mpd_qmin_mag 
mpd_qmin_mag
5243
}
5244
5245
/* Minimum space needed for the result array in _karatsuba_rec(). */
5246
static inline mpd_size_t
5247
_kmul_resultsize(mpd_size_t la, mpd_size_t lb)
5248
{
5249
    mpd_size_t n, m;
5250
5251
    n = add_size_t(la, lb);
inline
        
add_size_t can be inlined into _kmul_resultsize with cost=165 (threshold=325) 
_kmul_resultsize
inline
        
add_size_t inlined into _kmul_resultsize 
_kmul_resultsize
5252
    n = add_size_t(n, 1);
inline
        
Not inlining. Cost of inlining add_size_t increases the cost of inlining _kmul_resultsize in other contexts 
_kmul_resultsize
inline
        
add_size_t will not be inlined into _kmul_resultsize 
_kmul_resultsize
inline
        
add_size_t can be inlined into _mpd_kmul with cost=160 (threshold=325) 
_mpd_kmul
inline
        
add_size_t inlined into _mpd_kmul 
_mpd_kmul
inline
        
add_size_t can be inlined into _mpd_kmul_fnt with cost=-14840 (threshold=325) 
_mpd_kmul_fnt
inline
        
add_size_t inlined into _mpd_kmul_fnt 
_mpd_kmul_fnt
5253
5254
    m = (la+1)/2 + 1;
5255
    m = mul_size_t(m, 3);
inline
        
Not inlining. Cost of inlining mul_size_t increases the cost of inlining _kmul_resultsize in other contexts 
_kmul_resultsize
inline
        
mul_size_t will not be inlined into _kmul_resultsize 
_kmul_resultsize
inline
        
mul_size_t can be inlined into _mpd_kmul with cost=180 (threshold=325) 
_mpd_kmul
inline
        
mul_size_t inlined into _mpd_kmul 
_mpd_kmul
inline
        
mul_size_t can be inlined into _mpd_kmul_fnt with cost=-14820 (threshold=325) 
_mpd_kmul_fnt
inline
        
mul_size_t inlined into _mpd_kmul_fnt 
_mpd_kmul_fnt
5256
5257
    return (m > n) ? m : n;
5258
}
5259
5260
/* Work space needed in _karatsuba_rec(). lim >= 4 */
5261
static inline mpd_size_t
5262
_kmul_worksize(mpd_size_t n, mpd_size_t lim)
5263
{
5264
    mpd_size_t m;
5265
5266
    if (n <= lim) {
5267
        return 0;
5268
    }
5269
5270
    m = (n+1)/2 + 1;
5271
5272
    return add_size_t(mul_size_t(m, 2), _kmul_worksize(m, lim));
inline
                      
mul_size_t can be inlined into _kmul_worksize with cost=180 (threshold=325) 
_kmul_worksize
inline
                      
mul_size_t inlined into _kmul_worksize 
_kmul_worksize
inline
           
add_size_t can be inlined into _kmul_worksize with cost=165 (threshold=325) 
_kmul_worksize
inline
           
add_size_t inlined into _kmul_worksize 
_kmul_worksize
inline
                                        
_kmul_worksize should never be inlined (cost=never) 
_kmul_worksize
inline
                                        
_kmul_worksize will not be inlined into _kmul_worksize 
_kmul_worksize
5273
}
5274
5275
5276
#define MPD_KARATSUBA_BASECASE 16  /* must be >= 4 */
5277
5278
/*
5279
 * Add the product of a and b to c.
5280
 * c must be _kmul_resultsize(la, lb) in size.
5281
 * w is used as a work array and must be _kmul_worksize(a, lim) in size.
5282
 * Roman E. Maeder, Storage Allocation for the Karatsuba Integer Multiplication
5283
 * Algorithm. In "Design and implementation of symbolic computation systems",
5284
 * Springer, 1993, ISBN 354057235X, 9783540572350.
5285
 */
5286
static void
5287
_karatsuba_rec(mpd_uint_t *c, const mpd_uint_t *a, const mpd_uint_t *b,
5288
               mpd_uint_t *w, mpd_size_t la, mpd_size_t lb)
5289
{
5290
    mpd_size_t m, lt;
5291
5292
    assert(la >= lb && lb > 0);
5293
    assert(la <= MPD_KARATSUBA_BASECASE || w != NULL);
5294
5295
    if (la <= MPD_KARATSUBA_BASECASE) {
5296
        _mpd_basemul(c, a, b, la, lb);
inline
        
_mpd_basemul will not be inlined into _karatsuba_rec because its definition is unavailable 
_karatsuba_rec
5297
        return;
5298
    }
5299
5300
    m = (la+1)/2;  /* ceil(la/2) */
5301
5302
    /* lb <= m < la */
5303
    if (lb <= m) {
5304
5305
        /* lb can now be larger than la-m */
5306
        if (lb > la-m) {
5307
            lt = lb + lb + 1;       /* space needed for result array */
5308
            mpd_uint_zero(w, lt);   /* clear result array */
inline
            
mpd_uint_zero should always be inlined (cost=always) 
_karatsuba_rec
inline
            
mpd_uint_zero inlined into _karatsuba_rec 
_karatsuba_rec
5309
            _karatsuba_rec(w, b, a+m, w+lt, lb, la-m); /* b*ah */
inline
            
_karatsuba_rec should never be inlined (cost=never) 
_karatsuba_rec
inline
            
_karatsuba_rec will not be inlined into _karatsuba_rec 
_karatsuba_rec
5310
        }
5311
        else {
5312
            lt = (la-m) + (la-m) + 1;  /* space needed for result array */
5313
            mpd_uint_zero(w, lt);      /* clear result array */
inline
            
mpd_uint_zero should always be inlined (cost=always) 
_karatsuba_rec
inline
            
mpd_uint_zero inlined into _karatsuba_rec 
_karatsuba_rec
5314
            _karatsuba_rec(w, a+m, b, w+lt, la-m, lb); /* ah*b */
inline
            
_karatsuba_rec should never be inlined (cost=never) 
_karatsuba_rec
inline
            
_karatsuba_rec will not be inlined into _karatsuba_rec 
_karatsuba_rec
5315
        }
5316
        _mpd_baseaddto(c+m, w, (la-m)+lb);      /* add ah*b*B**m */
inline
        
_mpd_baseaddto will not be inlined into _karatsuba_rec because its definition is unavailable 
_karatsuba_rec
5317
5318
        lt = m + m + 1;         /* space needed for the result array */
5319
        mpd_uint_zero(w, lt);   /* clear result array */
inline
        
mpd_uint_zero should always be inlined (cost=always) 
_karatsuba_rec
inline
        
mpd_uint_zero inlined into _karatsuba_rec 
_karatsuba_rec
5320
        _karatsuba_rec(w, a, b, w+lt, m, lb);  /* al*b */
inline
        
_karatsuba_rec should never be inlined (cost=never) 
_karatsuba_rec
inline
        
_karatsuba_rec will not be inlined into _karatsuba_rec 
_karatsuba_rec
5321
        _mpd_baseaddto(c, w, m+lb);    /* add al*b */
inline
        
_mpd_baseaddto will not be inlined into _karatsuba_rec because its definition is unavailable 
_karatsuba_rec
5322
5323
        return;
5324
    }
5325
5326
    /* la >= lb > m */
5327
    memcpy(w, a, m * sizeof *w);
5328
    w[m] = 0;
5329
    _mpd_baseaddto(w, a+m, la-m);
inline
    
_mpd_baseaddto will not be inlined into _karatsuba_rec because its definition is unavailable 
_karatsuba_rec
5330
5331
    memcpy(w+(m+1), b, m * sizeof *w);
5332
    w[m+1+m] = 0;
5333
    _mpd_baseaddto(w+(m+1), b+m, lb-m);
inline
    
_mpd_baseaddto will not be inlined into _karatsuba_rec because its definition is unavailable 
_karatsuba_rec
5334
5335
    _karatsuba_rec(c+m, w, w+(m+1), w+2*(m+1), m+1, m+1);
inline
    
_karatsuba_rec should never be inlined (cost=never) 
_karatsuba_rec
inline
    
_karatsuba_rec will not be inlined into _karatsuba_rec 
_karatsuba_rec
5336
5337
    lt = (la-m) + (la-m) + 1;
5338
    mpd_uint_zero(w, lt);
inline
    
mpd_uint_zero should always be inlined (cost=always) 
_karatsuba_rec
inline
    
mpd_uint_zero inlined into _karatsuba_rec 
_karatsuba_rec
5339
5340
    _karatsuba_rec(w, a+m, b+m, w+lt, la-m, lb-m);
inline
    
_karatsuba_rec should never be inlined (cost=never) 
_karatsuba_rec
inline
    
_karatsuba_rec will not be inlined into _karatsuba_rec 
_karatsuba_rec
5341
5342
    _mpd_baseaddto(c+2*m, w, (la-m) + (lb-m));
inline
    
_mpd_baseaddto will not be inlined into _karatsuba_rec because its definition is unavailable 
_karatsuba_rec
5343
    _mpd_basesubfrom(c+m, w, (la-m) + (lb-m));
inline
    
_mpd_basesubfrom will not be inlined into _karatsuba_rec because its definition is unavailable 
_karatsuba_rec
5344
5345
    lt = m + m + 1;
5346
    mpd_uint_zero(w, lt);
inline
    
mpd_uint_zero should always be inlined (cost=always) 
_karatsuba_rec
inline
    
mpd_uint_zero inlined into _karatsuba_rec 
_karatsuba_rec
5347
5348
    _karatsuba_rec(w, a, b, w+lt, m, m);
inline
    
_karatsuba_rec should never be inlined (cost=never) 
_karatsuba_rec
inline
    
_karatsuba_rec will not be inlined into _karatsuba_rec 
_karatsuba_rec
5349
    _mpd_baseaddto(c, w, m+m);
inline
    
_mpd_baseaddto will not be inlined into _karatsuba_rec because its definition is unavailable 
_karatsuba_rec
5350
    _mpd_basesubfrom(c+m, w, m+m);
inline
    
_mpd_basesubfrom will not be inlined into _karatsuba_rec because its definition is unavailable 
_karatsuba_rec
5351
5352
    return;
5353
}
5354
5355
/*
5356
 * Multiply u and v, using Karatsuba multiplication. Returns a pointer
5357
 * to the result or NULL in case of failure (malloc error).
5358
 * Conditions: ulen >= vlen, ulen >= 4
5359
 */
5360
static mpd_uint_t *
5361
_mpd_kmul(const mpd_uint_t *u, const mpd_uint_t *v,
5362
          mpd_size_t ulen, mpd_size_t vlen,
5363
          mpd_size_t *rsize)
5364
{
5365
    mpd_uint_t *result = NULL, *w = NULL;
5366
    mpd_size_t m;
5367
5368
    assert(ulen >= 4);
5369
    assert(ulen >= vlen);
5370
5371
    *rsize = _kmul_resultsize(ulen, vlen);
inline
             
_kmul_resultsize can be inlined into _mpd_kmul with cost=270 (threshold=325) 
_mpd_kmul
inline
             
_kmul_resultsize inlined into _mpd_kmul 
_mpd_kmul
5372
    if ((result = mpd_calloc(*rsize, sizeof *result)) == NULL) {
inline
                  
mpd_calloc will not be inlined into _mpd_kmul because its definition is unavailable 
_mpd_kmul
5373
        return NULL;
5374
    }
5375
5376
    m = _kmul_worksize(ulen, MPD_KARATSUBA_BASECASE);
inline
        
_kmul_worksize should never be inlined (cost=never) 
_mpd_kmul
inline
        
_kmul_worksize will not be inlined into _mpd_kmul 
_mpd_kmul
inline
        
_kmul_worksize should never be inlined (cost=never) 
_mpd_qmul
inline
        
_kmul_worksize will not be inlined into _mpd_qmul 
_mpd_qmul
5377
    if (m && ((w = mpd_calloc(m, sizeof *w)) == NULL)) {
inline
                   
mpd_calloc will not be inlined into _mpd_kmul because its definition is unavailable 
_mpd_kmul
5378
        mpd_free(result);
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_kmul
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qmul
5379
        return NULL;
5380
    }
5381
5382
    _karatsuba_rec(result, u, v, w, ulen, vlen);
inline
    
_karatsuba_rec should never be inlined (cost=never) 
_mpd_kmul
inline
    
_karatsuba_rec will not be inlined into _mpd_kmul 
_mpd_kmul
inline
    
_karatsuba_rec should never be inlined (cost=never) 
_mpd_qmul
inline
    
_karatsuba_rec will not be inlined into _mpd_qmul 
_mpd_qmul
5383
5384
5385
    if (w) mpd_free(w);
gvn
           
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_kmul
gvn
           
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qmul
5386
    return result;
5387
}
5388
5389
5390
/*
5391
 * Determine the minimum length for the number theoretic transform. Valid
5392
 * transform lengths are 2**n or 3*2**n, where 2**n <= MPD_MAXTRANSFORM_2N.
5393
 * The function finds the shortest length m such that rsize <= m.
5394
 */
5395
static inline mpd_size_t
5396
_mpd_get_transform_len(mpd_size_t rsize)
5397
{
5398
    mpd_size_t log2rsize;
5399
    mpd_size_t x, step;
5400
5401
    assert(rsize >= 4);
5402
    log2rsize = mpd_bsr(rsize);
inline
                
mpd_bsr can be inlined into _mpd_get_transform_len with cost=-25 (threshold=487) 
_mpd_get_transform_len
inline
                
mpd_bsr inlined into _mpd_get_transform_len 
_mpd_get_transform_len
5403
5404
    if (rsize <= 1024) {
5405
        /* 2**n is faster in this range. */
5406
        x = ((mpd_size_t)1)<<log2rsize;
5407
        return (rsize == x) ? x : x<<1;
5408
    }
5409
    else if (rsize <= MPD_MAXTRANSFORM_2N) {
5410
        x = ((mpd_size_t)1)<<log2rsize;
5411
        if (rsize == x) return x;
5412
        step = x>>1;
5413
        x += step;
5414
        return (rsize <= x) ? x : x + step;
5415
    }
5416
    else if (rsize <= MPD_MAXTRANSFORM_2N+MPD_MAXTRANSFORM_2N/2) {
5417
        return MPD_MAXTRANSFORM_2N+MPD_MAXTRANSFORM_2N/2;
5418
    }
5419
    else if (rsize <= 3*MPD_MAXTRANSFORM_2N) {
5420
        return 3*MPD_MAXTRANSFORM_2N;
5421
    }
5422
    else {
5423
        return MPD_SIZE_MAX;
5424
    }
5425
}
5426
5427
#ifdef PPRO
5428
#ifndef _MSC_VER
5429
static inline unsigned short
5430
_mpd_get_control87(void)
5431
{
5432
    unsigned short cw;
5433
5434
    __asm__ __volatile__ ("fnstcw %0" : "=m" (cw));
5435
    return cw;
5436
}
5437
5438
static inline void
5439
_mpd_set_control87(unsigned short cw)
5440
{
5441
    __asm__ __volatile__ ("fldcw %0" : : "m" (cw));
5442
}
5443
#endif
5444
5445
static unsigned int
5446
mpd_set_fenv(void)
5447
{
5448
    unsigned int cw;
5449
#ifdef _MSC_VER
5450
    unsigned int flags =
5451
        _EM_INVALID|_EM_DENORMAL|_EM_ZERODIVIDE|_EM_OVERFLOW|
5452
        _EM_UNDERFLOW|_EM_INEXACT|_RC_CHOP|_PC_64;
5453
    unsigned int mask = _MCW_EM|_MCW_RC|_MCW_PC;
5454
    unsigned int dummy;
5455
5456
    __control87_2(0, 0, &cw, NULL);
5457
    __control87_2(flags, mask, &dummy, NULL);
5458
#else
5459
    cw = _mpd_get_control87();
5460
    _mpd_set_control87(cw|0xF3F);
5461
#endif
5462
    return cw;
5463
}
5464
5465
static void
5466
mpd_restore_fenv(unsigned int cw)
5467
{
5468
#ifdef _MSC_VER
5469
    unsigned int mask = _MCW_EM|_MCW_RC|_MCW_PC;
5470
    unsigned int dummy;
5471
5472
    __control87_2(cw, mask, &dummy, NULL);
5473
#else
5474
    _mpd_set_control87((unsigned short)cw);
5475
#endif
5476
}
5477
#endif /* PPRO */
5478
5479
/*
5480
 * Multiply u and v, using the fast number theoretic transform. Returns
5481
 * a pointer to the result or NULL in case of failure (malloc error).
5482
 */
5483
static mpd_uint_t *
5484
_mpd_fntmul(const mpd_uint_t *u, const mpd_uint_t *v,
5485
            mpd_size_t ulen, mpd_size_t vlen,
5486
            mpd_size_t *rsize)
5487
{
5488
    mpd_uint_t *c1 = NULL, *c2 = NULL, *c3 = NULL, *vtmp = NULL;
5489
    mpd_size_t n;
5490
5491
#ifdef PPRO
5492
    unsigned int cw;
5493
    cw = mpd_set_fenv();
5494
#endif
5495
5496
    *rsize = add_size_t(ulen, vlen);
inline
             
add_size_t can be inlined into _mpd_fntmul with cost=165 (threshold=325) 
_mpd_fntmul
inline
             
add_size_t inlined into _mpd_fntmul 
_mpd_fntmul
5497
    if ((n = _mpd_get_transform_len(*rsize)) == MPD_SIZE_MAX) {
inline
             
_mpd_get_transform_len can be inlined into _mpd_fntmul with cost=-14920 (threshold=325) 
_mpd_fntmul
inline
             
_mpd_get_transform_len inlined into _mpd_fntmul 
_mpd_fntmul
5498
        goto malloc_error;
5499
    }
5500
5501
    if ((c1 = mpd_calloc(n, sizeof *c1)) == NULL) {
inline
              
mpd_calloc will not be inlined into _mpd_fntmul because its definition is unavailable 
_mpd_fntmul
5502
        goto malloc_error;
5503
    }
5504
    if ((c2 = mpd_calloc(n, sizeof *c2)) == NULL) {
inline
              
mpd_calloc will not be inlined into _mpd_fntmul because its definition is unavailable 
_mpd_fntmul
5505
        goto malloc_error;
5506
    }
5507
    if ((c3 = mpd_calloc(n, sizeof *c3)) == NULL) {
inline
              
mpd_calloc will not be inlined into _mpd_fntmul because its definition is unavailable 
_mpd_fntmul
5508
        goto malloc_error;
5509
    }
5510
5511
    memcpy(c1, u, ulen * (sizeof *c1));
5512
    memcpy(c2, u, ulen * (sizeof *c2));
5513
    memcpy(c3, u, ulen * (sizeof *c3));
5514
5515
    if (u == v) {
5516
        if (!fnt_autoconvolute(c1, n, P1) ||
inline
             
fnt_autoconvolute will not be inlined into _mpd_fntmul because its definition is unavailable 
_mpd_fntmul
5517
            !fnt_autoconvolute(c2, n, P2) ||
inline
             
fnt_autoconvolute will not be inlined into _mpd_fntmul because its definition is unavailable 
_mpd_fntmul
5518
            !fnt_autoconvolute(c3, n, P3)) {
inline
             
fnt_autoconvolute will not be inlined into _mpd_fntmul because its definition is unavailable 
_mpd_fntmul
5519
            goto malloc_error;
5520
        }
5521
    }
5522
    else {
5523
        if ((vtmp = mpd_calloc(n, sizeof *vtmp)) == NULL) {
inline
                    
mpd_calloc will not be inlined into _mpd_fntmul because its definition is unavailable 
_mpd_fntmul
5524
            goto malloc_error;
5525
        }
5526
5527
        memcpy(vtmp, v, vlen * (sizeof *vtmp));
5528
        if (!fnt_convolute(c1, vtmp, n, P1)) {
inline
             
fnt_convolute will not be inlined into _mpd_fntmul because its definition is unavailable 
_mpd_fntmul
5529
            mpd_free(vtmp);
gvn
            
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_fntmul
5530
            goto malloc_error;
5531
        }
5532
5533
        memcpy(vtmp, v, vlen * (sizeof *vtmp));
5534
        mpd_uint_zero(vtmp+vlen, n-vlen);
inline
        
mpd_uint_zero should always be inlined (cost=always) 
_mpd_fntmul
inline
        
mpd_uint_zero inlined into _mpd_fntmul 
_mpd_fntmul
5535
        if (!fnt_convolute(c2, vtmp, n, P2)) {
inline
             
fnt_convolute will not be inlined into _mpd_fntmul because its definition is unavailable 
_mpd_fntmul
5536
            mpd_free(vtmp);
gvn
            
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_fntmul
5537
            goto malloc_error;
5538
        }
5539
5540
        memcpy(vtmp, v, vlen * (sizeof *vtmp));
5541
        mpd_uint_zero(vtmp+vlen, n-vlen);
inline
        
mpd_uint_zero should always be inlined (cost=always) 
_mpd_fntmul
inline
        
mpd_uint_zero inlined into _mpd_fntmul 
_mpd_fntmul
5542
        if (!fnt_convolute(c3, vtmp, n, P3)) {
inline
             
fnt_convolute will not be inlined into _mpd_fntmul because its definition is unavailable 
_mpd_fntmul
5543
            mpd_free(vtmp);
5544
            goto malloc_error;
5545
        }
5546
5547
        mpd_free(vtmp);
5548
    }
5549
5550
    crt3(c1, c2, c3, *rsize);
inline
    
crt3 will not be inlined into _mpd_fntmul because its definition is unavailable 
_mpd_fntmul
gvn
                     
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_fntmul
5551
5552
out:
5553
#ifdef PPRO
5554
    mpd_restore_fenv(cw);
5555
#endif
5556
    if (c2) mpd_free(c2);
gvn
            
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_fntmul
5557
    if (c3) mpd_free(c3);
gvn
            
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_fntmul
5558
    return c1;
5559
5560
malloc_error:
5561
    if (c1) mpd_free(c1);
gvn
            
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_fntmul
5562
    c1 = NULL;
5563
    goto out;
5564
}
5565
5566
5567
/*
5568
 * Karatsuba multiplication with FNT/basemul as the base case.
5569
 */
5570
static int
5571
_karatsuba_rec_fnt(mpd_uint_t *c, const mpd_uint_t *a, const mpd_uint_t *b,
5572
                   mpd_uint_t *w, mpd_size_t la, mpd_size_t lb)
5573
{
5574
    mpd_size_t m, lt;
5575
5576
    assert(la >= lb && lb > 0);
5577
    assert(la <= 3*(MPD_MAXTRANSFORM_2N/2) || w != NULL);
5578
5579
    if (la <= 3*(MPD_MAXTRANSFORM_2N/2)) {
5580
5581
        if (lb <= 192) {
5582
            _mpd_basemul(c, b, a, lb, la);
inline
            
_mpd_basemul will not be inlined into _karatsuba_rec_fnt because its definition is unavailable 
_karatsuba_rec_fnt
5583
        }
5584
        else {
5585
            mpd_uint_t *result;
5586
            mpd_size_t dummy;
5587
5588
            if ((result = _mpd_fntmul(a, b, la, lb, &dummy)) == NULL) {
inline
                          
_mpd_fntmul too costly to inline (cost=670, threshold=625) 
_karatsuba_rec_fnt
inline
                          
_mpd_fntmul will not be inlined into _karatsuba_rec_fnt 
_karatsuba_rec_fnt
5589
                return 0;
5590
            }
5591
            memcpy(c, result, (la+lb) * (sizeof *result));
5592
            mpd_free(result);
gvn
            
load of type void (i8*)* not eliminated because it is clobbered by call 
_karatsuba_rec_fnt
5593
        }
5594
        return 1;
5595
    }
5596
5597
    m = (la+1)/2;  /* ceil(la/2) */
5598
5599
    /* lb <= m < la */
5600
    if (lb <= m) {
5601
5602
        /* lb can now be larger than la-m */
5603
        if (lb > la-m) {
5604
            lt = lb + lb + 1;       /* space needed for result array */
5605
            mpd_uint_zero(w, lt);   /* clear result array */
inline
            
mpd_uint_zero should always be inlined (cost=always) 
_karatsuba_rec_fnt
inline
            
mpd_uint_zero inlined into _karatsuba_rec_fnt 
_karatsuba_rec_fnt
5606
            if (!_karatsuba_rec_fnt(w, b, a+m, w+lt, lb, la-m)) { /* b*ah */
inline
                 
_karatsuba_rec_fnt should never be inlined (cost=never) 
_karatsuba_rec_fnt
inline
                 
_karatsuba_rec_fnt will not be inlined into _karatsuba_rec_fnt 
_karatsuba_rec_fnt
5607
                return 0; /* GCOV_UNLIKELY */
5608
            }
5609
        }
5610
        else {
5611
            lt = (la-m) + (la-m) + 1;  /* space needed for result array */
5612
            mpd_uint_zero(w, lt);      /* clear result array */
inline
            
mpd_uint_zero should always be inlined (cost=always) 
_karatsuba_rec_fnt
inline
            
mpd_uint_zero inlined into _karatsuba_rec_fnt 
_karatsuba_rec_fnt
5613
            if (!_karatsuba_rec_fnt(w, a+m, b, w+lt, la-m, lb)) { /* ah*b */
inline
                 
_karatsuba_rec_fnt should never be inlined (cost=never) 
_karatsuba_rec_fnt
inline
                 
_karatsuba_rec_fnt will not be inlined into _karatsuba_rec_fnt 
_karatsuba_rec_fnt
5614
                return 0; /* GCOV_UNLIKELY */
5615
            }
5616
        }
5617
        _mpd_baseaddto(c+m, w, (la-m)+lb); /* add ah*b*B**m */
inline
        
_mpd_baseaddto will not be inlined into _karatsuba_rec_fnt because its definition is unavailable 
_karatsuba_rec_fnt
5618
5619
        lt = m + m + 1;         /* space needed for the result array */
5620
        mpd_uint_zero(w, lt);   /* clear result array */
inline
        
mpd_uint_zero should always be inlined (cost=always) 
_karatsuba_rec_fnt
inline
        
mpd_uint_zero inlined into _karatsuba_rec_fnt 
_karatsuba_rec_fnt
5621
        if (!_karatsuba_rec_fnt(w, a, b, w+lt, m, lb)) {  /* al*b */
inline
             
_karatsuba_rec_fnt should never be inlined (cost=never) 
_karatsuba_rec_fnt
inline
             
_karatsuba_rec_fnt will not be inlined into _karatsuba_rec_fnt 
_karatsuba_rec_fnt
5622
            return 0; /* GCOV_UNLIKELY */
5623
        }
5624
        _mpd_baseaddto(c, w, m+lb);       /* add al*b */
inline
        
_mpd_baseaddto will not be inlined into _karatsuba_rec_fnt because its definition is unavailable 
_karatsuba_rec_fnt
5625
5626
        return 1;
5627
    }
5628
5629
    /* la >= lb > m */
5630
    memcpy(w, a, m * sizeof *w);
5631
    w[m] = 0;
5632
    _mpd_baseaddto(w, a+m, la-m);
inline
    
_mpd_baseaddto will not be inlined into _karatsuba_rec_fnt because its definition is unavailable 
_karatsuba_rec_fnt
5633
5634
    memcpy(w+(m+1), b, m * sizeof *w);
5635
    w[m+1+m] = 0;
5636
    _mpd_baseaddto(w+(m+1), b+m, lb-m);
inline
    
_mpd_baseaddto will not be inlined into _karatsuba_rec_fnt because its definition is unavailable 
_karatsuba_rec_fnt
5637
5638
    if (!_karatsuba_rec_fnt(c+m, w, w+(m+1), w+2*(m+1), m+1, m+1)) {
inline
         
_karatsuba_rec_fnt should never be inlined (cost=never) 
_karatsuba_rec_fnt
inline
         
_karatsuba_rec_fnt will not be inlined into _karatsuba_rec_fnt 
_karatsuba_rec_fnt
5639
        return 0; /* GCOV_UNLIKELY */
5640
    }
5641
5642
    lt = (la-m) + (la-m) + 1;
5643
    mpd_uint_zero(w, lt);
inline
    
mpd_uint_zero should always be inlined (cost=always) 
_karatsuba_rec_fnt
inline
    
mpd_uint_zero inlined into _karatsuba_rec_fnt 
_karatsuba_rec_fnt
5644
5645
    if (!_karatsuba_rec_fnt(w, a+m, b+m, w+lt, la-m, lb-m)) {
inline
         
_karatsuba_rec_fnt should never be inlined (cost=never) 
_karatsuba_rec_fnt
inline
         
_karatsuba_rec_fnt will not be inlined into _karatsuba_rec_fnt 
_karatsuba_rec_fnt
5646
        return 0; /* GCOV_UNLIKELY */
5647
    }
5648
5649
    _mpd_baseaddto(c+2*m, w, (la-m) + (lb-m));
inline
    
_mpd_baseaddto will not be inlined into _karatsuba_rec_fnt because its definition is unavailable 
_karatsuba_rec_fnt
5650
    _mpd_basesubfrom(c+m, w, (la-m) + (lb-m));
inline
    
_mpd_basesubfrom will not be inlined into _karatsuba_rec_fnt because its definition is unavailable 
_karatsuba_rec_fnt
5651
5652
    lt = m + m + 1;
5653
    mpd_uint_zero(w, lt);
inline
    
mpd_uint_zero should always be inlined (cost=always) 
_karatsuba_rec_fnt
inline
    
mpd_uint_zero inlined into _karatsuba_rec_fnt 
_karatsuba_rec_fnt
5654
5655
    if (!_karatsuba_rec_fnt(w, a, b, w+lt, m, m)) {
inline
         
_karatsuba_rec_fnt should never be inlined (cost=never) 
_karatsuba_rec_fnt
inline
         
_karatsuba_rec_fnt will not be inlined into _karatsuba_rec_fnt 
_karatsuba_rec_fnt
5656
        return 0; /* GCOV_UNLIKELY */
5657
    }
5658
    _mpd_baseaddto(c, w, m+m);
inline
    
_mpd_baseaddto will not be inlined into _karatsuba_rec_fnt because its definition is unavailable 
_karatsuba_rec_fnt
5659
    _mpd_basesubfrom(c+m, w, m+m);
inline
    
_mpd_basesubfrom will not be inlined into _karatsuba_rec_fnt because its definition is unavailable 
_karatsuba_rec_fnt
5660
5661
    return 1;
5662
}
5663
5664
/*
5665
 * Multiply u and v, using Karatsuba multiplication with the FNT as the
5666
 * base case. Returns a pointer to the result or NULL in case of failure
5667
 * (malloc error). Conditions: ulen >= vlen, ulen >= 4.
5668
 */
5669
static mpd_uint_t *
5670
_mpd_kmul_fnt(const mpd_uint_t *u, const mpd_uint_t *v,
5671
              mpd_size_t ulen, mpd_size_t vlen,
5672
              mpd_size_t *rsize)
5673
{
5674
    mpd_uint_t *result = NULL, *w = NULL;
5675
    mpd_size_t m;
5676
5677
    assert(ulen >= 4);
5678
    assert(ulen >= vlen);
5679
5680
    *rsize = _kmul_resultsize(ulen, vlen);
inline
             
_kmul_resultsize can be inlined into _mpd_kmul_fnt with cost=-14730 (threshold=325) 
_mpd_kmul_fnt
inline
             
_kmul_resultsize inlined into _mpd_kmul_fnt 
_mpd_kmul_fnt
5681
    if ((result = mpd_calloc(*rsize, sizeof *result)) == NULL) {
inline
                  
mpd_calloc will not be inlined into _mpd_kmul_fnt because its definition is unavailable 
_mpd_kmul_fnt
5682
        return NULL;
5683
    }
5684
5685
    m = _kmul_worksize(ulen, 3*(MPD_MAXTRANSFORM_2N/2));
inline
        
_kmul_worksize should never be inlined (cost=never) 
_mpd_kmul_fnt
inline
        
_kmul_worksize will not be inlined into _mpd_kmul_fnt 
_mpd_kmul_fnt
inline
        
_kmul_worksize should never be inlined (cost=never) 
_mpd_qmul
inline
        
_kmul_worksize will not be inlined into _mpd_qmul 
_mpd_qmul
5686
    if (m && ((w = mpd_calloc(m, sizeof *w)) == NULL)) {
inline
                   
mpd_calloc will not be inlined into _mpd_kmul_fnt because its definition is unavailable 
_mpd_kmul_fnt
5687
        mpd_free(result); /* GCOV_UNLIKELY */
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_kmul_fnt
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qmul
5688
        return NULL; /* GCOV_UNLIKELY */
5689
    }
5690
5691
    if (!_karatsuba_rec_fnt(result, u, v, w, ulen, vlen)) {
inline
         
_karatsuba_rec_fnt should never be inlined (cost=never) 
_mpd_kmul_fnt
inline
         
_karatsuba_rec_fnt will not be inlined into _mpd_kmul_fnt 
_mpd_kmul_fnt
inline
         
_karatsuba_rec_fnt should never be inlined (cost=never) 
_mpd_qmul
inline
         
_karatsuba_rec_fnt will not be inlined into _mpd_qmul 
_mpd_qmul
5692
        mpd_free(result);
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_kmul_fnt
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qmul
5693
        result = NULL;
5694
    }
5695
5696
5697
    if (w) mpd_free(w);
gvn
           
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_kmul_fnt
gvn
           
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qmul
5698
    return result;
5699
}
5700
5701
5702
/* Deal with the special cases of multiplying infinities. */
5703
static void
5704
_mpd_qmul_inf(mpd_t *result, const mpd_t *a, const mpd_t *b, uint32_t *status)
5705
{
5706
    if (mpd_isinfinite(a)) {
inline
        
mpd_isinfinite should always be inlined (cost=always) 
_mpd_qmul_inf
inline
        
mpd_isinfinite inlined into _mpd_qmul_inf 
_mpd_qmul_inf
5707
        if (mpd_iszero(b)) {
inline
            
mpd_iszero should always be inlined (cost=always) 
_mpd_qmul_inf
inline
            
mpd_iszero inlined into _mpd_qmul_inf 
_mpd_qmul_inf
5708
            mpd_seterror(result, MPD_Invalid_operation, status);
inline
            
mpd_seterror can be inlined into _mpd_qmul_inf with cost=130 (threshold=250) 
_mpd_qmul_inf
inline
            
mpd_seterror inlined into _mpd_qmul_inf 
_mpd_qmul_inf
5709
        }
5710
        else {
5711
            mpd_setspecial(result, mpd_sign(a)^mpd_sign(b), MPD_INF);
inline
                                   
mpd_sign should always be inlined (cost=always) 
_mpd_qmul_inf
inline
                                   
mpd_sign inlined into _mpd_qmul_inf 
_mpd_qmul_inf
inline
            
mpd_setspecial can be inlined into _mpd_qmul_inf with cost=120 (threshold=250) 
_mpd_qmul_inf
inline
            
mpd_setspecial inlined into _mpd_qmul_inf 
_mpd_qmul_inf
inline
                                               
mpd_sign should always be inlined (cost=always) 
_mpd_qmul_inf
inline
                                               
mpd_sign inlined into _mpd_qmul_inf 
_mpd_qmul_inf
5712
        }
5713
        return;
5714
    }
5715
    assert(mpd_isinfinite(b));
5716
    if (mpd_iszero(a)) {
inline
        
mpd_iszero should always be inlined (cost=always) 
_mpd_qmul_inf
inline
        
mpd_iszero inlined into _mpd_qmul_inf 
_mpd_qmul_inf
5717
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into _mpd_qmul_inf with cost=130 (threshold=250) 
_mpd_qmul_inf
inline
        
mpd_seterror inlined into _mpd_qmul_inf 
_mpd_qmul_inf
5718
    }
5719
    else {
5720
        mpd_setspecial(result, mpd_sign(a)^mpd_sign(b), MPD_INF);
inline
        
mpd_setspecial can be inlined into _mpd_qmul_inf with cost=120 (threshold=250) 
_mpd_qmul_inf
inline
        
mpd_setspecial inlined into _mpd_qmul_inf 
_mpd_qmul_inf
inline
                                           
mpd_sign should always be inlined (cost=always) 
_mpd_qmul_inf
inline
                                           
mpd_sign inlined into _mpd_qmul_inf 
_mpd_qmul_inf
inline
                               
mpd_sign should always be inlined (cost=always) 
_mpd_qmul_inf
inline
                               
mpd_sign inlined into _mpd_qmul_inf 
_mpd_qmul_inf
5721
    }
5722
}
5723
5724
/*
5725
 * Internal function: Multiply a and b. _mpd_qmul deals with specials but
5726
 * does NOT finalize the result. This is for use in mpd_fma().
5727
 */
5728
static inline void
5729
_mpd_qmul(mpd_t *result, const mpd_t *a, const mpd_t *b,
5730
          const mpd_context_t *ctx, uint32_t *status)
5731
{
5732
    const mpd_t *big = a, *small = b;
5733
    mpd_uint_t *rdata = NULL;
5734
    mpd_uint_t rbuf[MPD_MINALLOC_MAX];
5735
    mpd_size_t rsize, i;
5736
5737
5738
    if (mpd_isspecial(a) || mpd_isspecial(b)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
_mpd_qmul
inline
        
mpd_isspecial inlined into _mpd_qmul 
_mpd_qmul
inline
                            
mpd_isspecial should always be inlined (cost=always) 
_mpd_qmul
inline
                            
mpd_isspecial inlined into _mpd_qmul 
_mpd_qmul
5739
        if (mpd_qcheck_nans(result, a, b, ctx, status)) {
inline
            
mpd_qcheck_nans too costly to inline (cost=385, threshold=250) 
_mpd_qmul
inline
            
mpd_qcheck_nans will not be inlined into _mpd_qmul 
_mpd_qmul
5740
            return;
5741
        }
5742
        _mpd_qmul_inf(result, a, b, status);
inline
        
_mpd_qmul_inf can be inlined into _mpd_qmul with cost=-14220 (threshold=250) 
_mpd_qmul
inline
        
_mpd_qmul_inf inlined into _mpd_qmul 
_mpd_qmul
5743
        return;
5744
    }
5745
5746
    if (small->len > big->len) {
5747
        _mpd_ptrswap(&big, &small);
inline
        
_mpd_ptrswap can be inlined into _mpd_qmul with cost=-15040 (threshold=487) 
_mpd_qmul
inline
        
_mpd_ptrswap inlined into _mpd_qmul 
_mpd_qmul
5748
    }
5749
5750
    rsize = big->len + small->len;
5751
5752
    if (big->len == 1) {
5753
        _mpd_singlemul(result->data, big->data[0], small->data[0]);
inline
        
_mpd_singlemul can be inlined into _mpd_qmul with cost=-14870 (threshold=487) 
_mpd_qmul
inline
        
_mpd_singlemul inlined into _mpd_qmul 
_mpd_qmul
5754
        goto finish;
5755
    }
5756
    if (rsize <= (mpd_size_t)MPD_MINALLOC_MAX) {
5757
        if (big->len == 2) {
5758
            _mpd_mul_2_le2(rbuf, big->data, small->data, small->len);
inline
            
_mpd_mul_2_le2 can be inlined into _mpd_qmul with cost=-14295 (threshold=325) 
_mpd_qmul
inline
            
_mpd_mul_2_le2 inlined into _mpd_qmul 
_mpd_qmul
5759
        }
5760
        else {
5761
            mpd_uint_zero(rbuf, rsize);
inline
            
mpd_uint_zero should always be inlined (cost=always) 
_mpd_qmul
inline
            
mpd_uint_zero inlined into _mpd_qmul 
_mpd_qmul
5762
            if (small->len == 1) {
gvn
                       
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qmul
gvn
                       
load eliminated by PRE 
_mpd_qmul
5763
                _mpd_shortmul(rbuf, big->data, big->len, small->data[0]);
inline
                
_mpd_shortmul will not be inlined into _mpd_qmul because its definition is unavailable 
_mpd_qmul
gvn
                                                                
load of type i64* not eliminated because it is clobbered by call 
_mpd_qmul
gvn
                                         
load of type i64* not eliminated because it is clobbered by call 
_mpd_qmul
gvn
                                                    
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qmul
gvn
                                                    
load eliminated by PRE 
_mpd_qmul
5764
            }
5765
            else {
5766
                _mpd_basemul(rbuf, small->data, big->data, small->len, big->len);
inline
                
_mpd_basemul will not be inlined into _mpd_qmul because its definition is unavailable 
_mpd_qmul
5767
            }
5768
        }
5769
        if (!mpd_qresize(result, rsize, status)) {
inline
             
mpd_qresize should always be inlined (cost=always) 
_mpd_qmul
inline
             
mpd_qresize inlined into _mpd_qmul 
_mpd_qmul
gvn
                                 
load of type i64 eliminated in favor of add 
_mpd_qmul
5770
            return;
5771
        }
5772
        for(i = 0; i < rsize; i++) {
licm
                       
hosting load 
_mpd_qmul
gvn
                       
load of type i64 eliminated in favor of add 
_mpd_qmul
loop-vectorize
        
vectorized loop (vectorization width: 2, interleaved count: 2) 
_mpd_qmul
loop-unroll
        
unrolled loop by a factor of 4 with run-time trip count 
_mpd_qmul
5773
            result->data[i] = rbuf[i];
licm
                    
hosting getelementptr 
_mpd_qmul
gvn
                              
load of type i64 not eliminated because it is clobbered by call 
_mpd_qmul
5774
        }
5775
        goto finish;
5776
    }
5777
5778
5779
    if (small->len <= 256) {
5780
        rdata = mpd_calloc(rsize, sizeof *rdata);
inline
                
mpd_calloc will not be inlined into _mpd_qmul because its definition is unavailable 
_mpd_qmul
5781
        if (rdata != NULL) {
5782
            if (small->len == 1) {
gvn
                       
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qmul
5783
                _mpd_shortmul(rdata, big->data, big->len, small->data[0]);
inline
                
_mpd_shortmul will not be inlined into _mpd_qmul because its definition is unavailable 
_mpd_qmul
gvn
                                                                 
load of type i64* not eliminated because it is clobbered by call 
_mpd_qmul
gvn
                                                     
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_qmul
gvn
                                          
load of type i64* not eliminated because it is clobbered by call 
_mpd_qmul
5784
            }
5785
            else {
5786
                _mpd_basemul(rdata, small->data, big->data, small->len, big->len);
inline
                
_mpd_basemul will not be inlined into _mpd_qmul because its definition is unavailable 
_mpd_qmul
5787
            }
5788
        }
5789
    }
5790
    else if (rsize <= 1024) {
5791
        rdata = _mpd_kmul(big->data, small->data, big->len, small->len, &rsize);
inline
                
_mpd_kmul can be inlined into _mpd_qmul with cost=-14105 (threshold=250) 
_mpd_qmul
inline
                
_mpd_kmul inlined into _mpd_qmul 
_mpd_qmul
5792
    }
5793
    else if (rsize <= 3*MPD_MAXTRANSFORM_2N) {
5794
        rdata = _mpd_fntmul(big->data, small->data, big->len, small->len, &rsize);
inline
                
_mpd_fntmul too costly to inline (cost=670, threshold=625) 
_mpd_qmul
inline
                
_mpd_fntmul will not be inlined into _mpd_qmul 
_mpd_qmul
5795
    }
5796
    else {
5797
        rdata = _mpd_kmul_fnt(big->data, small->data, big->len, small->len, &rsize);
inline
                
_mpd_kmul_fnt can be inlined into _mpd_qmul with cost=-14130 (threshold=250) 
_mpd_qmul
inline
                
_mpd_kmul_fnt inlined into _mpd_qmul 
_mpd_qmul
5798
    }
5799
5800
    if (rdata == NULL) {
5801
        mpd_seterror(result, MPD_Malloc_error, status);
inline
        
mpd_seterror can be inlined into _mpd_qmul with cost=130 (threshold=250) 
_mpd_qmul
inline
        
mpd_seterror inlined into _mpd_qmul 
_mpd_qmul
5802
        return;
5803
    }
5804
5805
    if (mpd_isdynamic_data(result)) {
inline
        
mpd_isdynamic_data should always be inlined (cost=always) 
_mpd_qmul
inline
        
mpd_isdynamic_data inlined into _mpd_qmul 
_mpd_qmul
5806
        mpd_free(result->data);
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
_mpd_qmul
gvn
                         
load of type i8* not eliminated because it is clobbered by call 
_mpd_qmul
5807
    }
5808
    result->data = rdata;
5809
    result->alloc = rsize;
gvn
                    
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qmul
5810
    mpd_set_dynamic_data(result);
inline
    
mpd_set_dynamic_data should always be inlined (cost=always) 
_mpd_qmul
inline
    
mpd_set_dynamic_data inlined into _mpd_qmul 
_mpd_qmul
5811
5812
5813
finish:
5814
    mpd_set_flags(result, mpd_sign(a)^mpd_sign(b));
inline
                          
mpd_sign should always be inlined (cost=always) 
_mpd_qmul
inline
                          
mpd_sign inlined into _mpd_qmul 
_mpd_qmul
inline
                                      
mpd_sign should always be inlined (cost=always) 
_mpd_qmul
inline
                                      
mpd_sign inlined into _mpd_qmul 
_mpd_qmul
inline
    
mpd_set_flags should always be inlined (cost=always) 
_mpd_qmul
inline
    
mpd_set_flags inlined into _mpd_qmul 
_mpd_qmul
5815
    result->exp = big->exp + small->exp;
gvn
                       
load of type i64 not eliminated because it is clobbered by store 
_mpd_qmul
gvn
                                    
load of type i64 not eliminated because it is clobbered by store 
_mpd_qmul
5816
    result->len = _mpd_real_size(result->data, rsize);
inline
                  
_mpd_real_size can be inlined into _mpd_qmul with cost=-5 (threshold=325) 
_mpd_qmul
inline
                  
_mpd_real_size inlined into _mpd_qmul 
_mpd_qmul
gvn
                                         
load of type i64* not eliminated because it is clobbered by call 
_mpd_qmul
gvn
                                         
load eliminated by PRE 
_mpd_qmul
gvn
                                               
load of type i64 eliminated in favor of phi 
_mpd_qmul
5817
    /* resize to smaller cannot fail */
5818
    mpd_qresize(result, result->len, status);
inline
    
mpd_qresize should always be inlined (cost=always) 
_mpd_qmul
inline
    
mpd_qresize inlined into _mpd_qmul 
_mpd_qmul
5819
    mpd_setdigits(result);
inline
    
mpd_setdigits can be inlined into _mpd_qmul with cost=295 (threshold=325) 
_mpd_qmul
inline
    
mpd_setdigits inlined into _mpd_qmul 
_mpd_qmul
5820
}
5821
5822
/* Multiply a and b. */
5823
void
5824
mpd_qmul(mpd_t *result, const mpd_t *a, const mpd_t *b,
5825
         const mpd_context_t *ctx, uint32_t *status)
5826
{
5827
    _mpd_qmul(result, a, b, ctx, status);
inline
    
_mpd_qmul too costly to inline (cost=815, threshold=812) 
mpd_qmul
inline
    
_mpd_qmul will not be inlined into mpd_qmul 
mpd_qmul
inline
    
_mpd_qmul too costly to inline (cost=815, threshold=812) 
_mpd_qmul_exact
inline
    
_mpd_qmul will not be inlined into _mpd_qmul_exact 
_mpd_qmul_exact
inline
    
_mpd_qmul too costly to inline (cost=815, threshold=812) 
_mpd_qreciprocal
inline
    
_mpd_qmul will not be inlined into _mpd_qreciprocal 
_mpd_qreciprocal
inline
    
_mpd_qmul too costly to inline (cost=815, threshold=812) 
_mpd_base_ndivmod
inline
    
_mpd_qmul will not be inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
inline
    
_mpd_qmul too costly to inline (cost=815, threshold=812) 
_mpd_qpow_uint
inline
    
_mpd_qmul will not be inlined into _mpd_qpow_uint 
_mpd_qpow_uint
inline
    
_mpd_qmul too costly to inline (cost=815, threshold=812) 
mpd_qln10
inline
    
_mpd_qmul will not be inlined into mpd_qln10 
mpd_qln10
inline
    
_mpd_qmul too costly to inline (cost=815, threshold=812) 
mpd_qmul_ssize
inline
    
_mpd_qmul will not be inlined into mpd_qmul_ssize 
mpd_qmul_ssize
inline
    
_mpd_qmul too costly to inline (cost=815, threshold=812) 
_mpd_qln
inline
    
_mpd_qmul will not be inlined into _mpd_qln 
_mpd_qln
inline
    
_mpd_qmul too costly to inline (cost=815, threshold=812) 
mpd_qmul_uint
inline
    
_mpd_qmul will not be inlined into mpd_qmul_uint 
mpd_qmul_uint
inline
    
_mpd_qmul too costly to inline (cost=815, threshold=812) 
_mpd_qpow_mpd
inline
    
_mpd_qmul will not be inlined into _mpd_qpow_mpd 
_mpd_qpow_mpd
inline
    
_mpd_qmul too costly to inline (cost=815, threshold=812) 
_mpd_qpow_int
inline
    
_mpd_qmul will not be inlined into _mpd_qpow_int 
_mpd_qpow_int
inline
    
_mpd_qmul too costly to inline (cost=815, threshold=812) 
_mpd_qpow_real
inline
    
_mpd_qmul will not be inlined into _mpd_qpow_real 
_mpd_qpow_real
inline
    
_mpd_qmul too costly to inline (cost=815, threshold=812) 
mpd_qpow
inline
    
_mpd_qmul will not be inlined into mpd_qpow 
mpd_qpow
inline
    
_mpd_qmul too costly to inline (cost=815, threshold=812) 
_mpd_qinvroot
inline
    
_mpd_qmul will not be inlined into _mpd_qinvroot 
_mpd_qinvroot
inline
    
_mpd_qmul too costly to inline (cost=815, threshold=812) 
mpd_qinvroot
inline
    
_mpd_qmul will not be inlined into mpd_qinvroot 
mpd_qinvroot
5828
    mpd_qfinalize(result, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qmul
inline
    
mpd_qfinalize will not be inlined into mpd_qmul 
mpd_qmul
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
_mpd_qmul_exact
inline
    
mpd_qfinalize will not be inlined into _mpd_qmul_exact 
_mpd_qmul_exact
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
_mpd_qreciprocal
inline
    
mpd_qfinalize will not be inlined into _mpd_qreciprocal 
_mpd_qreciprocal
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
_mpd_base_ndivmod
inline
    
mpd_qfinalize will not be inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
_mpd_qpow_uint
inline
    
mpd_qfinalize will not be inlined into _mpd_qpow_uint 
_mpd_qpow_uint
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qln10
inline
    
mpd_qfinalize will not be inlined into mpd_qln10 
mpd_qln10
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qmul_ssize
inline
    
mpd_qfinalize will not be inlined into mpd_qmul_ssize 
mpd_qmul_ssize
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
_mpd_qln
inline
    
mpd_qfinalize will not be inlined into _mpd_qln 
_mpd_qln
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qmul_uint
inline
    
mpd_qfinalize will not be inlined into mpd_qmul_uint 
mpd_qmul_uint
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
_mpd_qpow_mpd
inline
    
mpd_qfinalize will not be inlined into _mpd_qpow_mpd 
_mpd_qpow_mpd
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
_mpd_qpow_int
inline
    
mpd_qfinalize will not be inlined into _mpd_qpow_int 
_mpd_qpow_int
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
_mpd_qpow_real
inline
    
mpd_qfinalize will not be inlined into _mpd_qpow_real 
_mpd_qpow_real
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qpow
inline
    
mpd_qfinalize will not be inlined into mpd_qpow 
mpd_qpow
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
_mpd_qinvroot
inline
    
mpd_qfinalize will not be inlined into _mpd_qinvroot 
_mpd_qinvroot
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qinvroot
inline
    
mpd_qfinalize will not be inlined into mpd_qinvroot 
mpd_qinvroot
5829
}
5830
5831
/* Multiply a and b. Set NaN/Invalid_operation if the result is inexact. */
5832
static void
5833
_mpd_qmul_exact(mpd_t *result, const mpd_t *a, const mpd_t *b,
5834
                const mpd_context_t *ctx, uint32_t *status)
5835
{
5836
    uint32_t workstatus = 0;
licm
    
hosting bitcast 
_mpd_base_ndivmod
5837
5838
    mpd_qmul(result, a, b, ctx, &workstatus);
inline
    
mpd_qmul can be inlined into _mpd_qmul_exact with cost=45 (threshold=375) 
_mpd_qmul_exact
inline
    
mpd_qmul inlined into _mpd_qmul_exact 
_mpd_qmul_exact
5839
    *status |= workstatus;
gvn
               
load of type i32 not eliminated in favor of store because it is clobbered by call 
_mpd_qmul_exact
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qmul_exact
licm
               
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_base_ndivmod
licm
            
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_base_ndivmod
gvn
               
load of type i32 not eliminated in favor of store because it is clobbered by call 
_mpd_base_ndivmod
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_base_ndivmod
5840
    if (workstatus & (MPD_Inexact|MPD_Rounded|MPD_Clamped)) {
5841
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into _mpd_qmul_exact with cost=130 (threshold=250) 
_mpd_qmul_exact
inline
        
mpd_seterror inlined into _mpd_qmul_exact 
_mpd_qmul_exact
5842
    }
5843
}
5844
5845
/* Multiply decimal and mpd_ssize_t. */
5846
void
5847
mpd_qmul_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b,
5848
               const mpd_context_t *ctx, uint32_t *status)
5849
{
5850
    mpd_context_t maxcontext;
5851
    MPD_NEW_STATIC(bb,0,0,0,0);
5852
5853
    mpd_maxcontext(&maxcontext);
inline
    
mpd_maxcontext will not be inlined into mpd_qmul_ssize because its definition is unavailable 
mpd_qmul_ssize
5854
    mpd_qsset_ssize(&bb, b, &maxcontext, status);
inline
    
mpd_qsset_ssize can be inlined into mpd_qmul_ssize with cost=65 (threshold=375) 
mpd_qmul_ssize
inline
    
mpd_qsset_ssize inlined into mpd_qmul_ssize 
mpd_qmul_ssize
5855
    mpd_qmul(result, a, &bb, ctx, status);
inline
    
mpd_qmul can be inlined into mpd_qmul_ssize with cost=45 (threshold=375) 
mpd_qmul_ssize
inline
    
mpd_qmul inlined into mpd_qmul_ssize 
mpd_qmul_ssize
5856
    mpd_del(&bb);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qmul_ssize
inline
    
mpd_del inlined into mpd_qmul_ssize 
mpd_qmul_ssize
5857
}
5858
5859
/* Multiply decimal and mpd_uint_t. */
5860
void
5861
mpd_qmul_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b,
5862
              const mpd_context_t *ctx, uint32_t *status)
5863
{
5864
    mpd_context_t maxcontext;
5865
    MPD_NEW_STATIC(bb,0,0,0,0);
5866
5867
    mpd_maxcontext(&maxcontext);
inline
    
mpd_maxcontext will not be inlined into mpd_qmul_uint because its definition is unavailable 
mpd_qmul_uint
5868
    mpd_qsset_uint(&bb, b, &maxcontext, status);
inline
    
mpd_qsset_uint can be inlined into mpd_qmul_uint with cost=45 (threshold=375) 
mpd_qmul_uint
inline
    
mpd_qsset_uint inlined into mpd_qmul_uint 
mpd_qmul_uint
5869
    mpd_qmul(result, a, &bb, ctx, status);
inline
    
mpd_qmul can be inlined into mpd_qmul_uint with cost=45 (threshold=375) 
mpd_qmul_uint
inline
    
mpd_qmul inlined into mpd_qmul_uint 
mpd_qmul_uint
5870
    mpd_del(&bb);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qmul_uint
inline
    
mpd_del inlined into mpd_qmul_uint 
mpd_qmul_uint
5871
}
5872
5873
void
5874
mpd_qmul_i32(mpd_t *result, const mpd_t *a, int32_t b,
5875
             const mpd_context_t *ctx, uint32_t *status)
5876
{
5877
    mpd_qmul_ssize(result, a, b, ctx, status);
inline
    
mpd_qmul_ssize too costly to inline (cost=300, threshold=250) 
mpd_qmul_i32
inline
    
mpd_qmul_ssize will not be inlined into mpd_qmul_i32 
mpd_qmul_i32
5878
}
5879
5880
void
5881
mpd_qmul_u32(mpd_t *result, const mpd_t *a, uint32_t b,
5882
             const mpd_context_t *ctx, uint32_t *status)
5883
{
5884
    mpd_qmul_uint(result, a, b, ctx, status);
inline
    
mpd_qmul_uint too costly to inline (cost=280, threshold=250) 
mpd_qmul_u32
inline
    
mpd_qmul_uint will not be inlined into mpd_qmul_u32 
mpd_qmul_u32
5885
}
5886
5887
#ifdef CONFIG_64
5888
void
5889
mpd_qmul_i64(mpd_t *result, const mpd_t *a, int64_t b,
5890
             const mpd_context_t *ctx, uint32_t *status)
5891
{
5892
    mpd_qmul_ssize(result, a, b, ctx, status);
inline
    
mpd_qmul_ssize too costly to inline (cost=300, threshold=250) 
mpd_qmul_i64
inline
    
mpd_qmul_ssize will not be inlined into mpd_qmul_i64 
mpd_qmul_i64
5893
}
5894
5895
void
5896
mpd_qmul_u64(mpd_t *result, const mpd_t *a, uint64_t b,
5897
             const mpd_context_t *ctx, uint32_t *status)
5898
{
5899
    mpd_qmul_uint(result, a, b, ctx, status);
inline
    
mpd_qmul_uint too costly to inline (cost=280, threshold=250) 
mpd_qmul_u64
inline
    
mpd_qmul_uint will not be inlined into mpd_qmul_u64 
mpd_qmul_u64
5900
}
5901
#elif !defined(LEGACY_COMPILER)
5902
/* Multiply decimal and int64_t. */
5903
void
5904
mpd_qmul_i64(mpd_t *result, const mpd_t *a, int64_t b,
5905
             const mpd_context_t *ctx, uint32_t *status)
5906
{
5907
    mpd_context_t maxcontext;
5908
    MPD_NEW_STATIC(bb,0,0,0,0);
5909
5910
    mpd_maxcontext(&maxcontext);
5911
    mpd_qset_i64(&bb, b, &maxcontext, status);
5912
    mpd_qmul(result, a, &bb, ctx, status);
5913
    mpd_del(&bb);
5914
}
5915
5916
/* Multiply decimal and uint64_t. */
5917
void
5918
mpd_qmul_u64(mpd_t *result, const mpd_t *a, uint64_t b,
5919
             const mpd_context_t *ctx, uint32_t *status)
5920
{
5921
    mpd_context_t maxcontext;
5922
    MPD_NEW_STATIC(bb,0,0,0,0);
5923
5924
    mpd_maxcontext(&maxcontext);
5925
    mpd_qset_u64(&bb, b, &maxcontext, status);
5926
    mpd_qmul(result, a, &bb, ctx, status);
5927
    mpd_del(&bb);
5928
}
5929
#endif
5930
5931
/* Like the minus operator. */
5932
void
5933
mpd_qminus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
5934
           uint32_t *status)
5935
{
5936
    if (mpd_isspecial(a)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qminus
inline
        
mpd_isspecial inlined into mpd_qminus 
mpd_qminus
5937
        if (mpd_qcheck_nan(result, a, ctx, status)) {
inline
            
mpd_qcheck_nan too costly to inline (cost=335, threshold=250) 
mpd_qminus
inline
            
mpd_qcheck_nan will not be inlined into mpd_qminus 
mpd_qminus
5938
            return;
5939
        }
5940
    }
5941
5942
    if (mpd_iszero(a) && ctx->round != MPD_ROUND_FLOOR) {
inline
        
mpd_iszero should always be inlined (cost=always) 
mpd_qminus
inline
        
mpd_iszero inlined into mpd_qminus 
mpd_qminus
gvn
                              
load of type i32 not eliminated because it is clobbered by call 
mpd_qminus
5943
        mpd_qcopy_abs(result, a, status);
inline
        
mpd_qcopy_abs can be inlined into mpd_qminus with cost=230 (threshold=250) 
mpd_qminus
inline
        
mpd_qcopy_abs inlined into mpd_qminus 
mpd_qminus
5944
    }
5945
    else {
5946
        mpd_qcopy_negate(result, a, status);
inline
        
mpd_qcopy_negate can be inlined into mpd_qminus with cost=230 (threshold=250) 
mpd_qminus
inline
        
mpd_qcopy_negate inlined into mpd_qminus 
mpd_qminus
5947
    }
5948
5949
    mpd_qfinalize(result, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qminus
inline
    
mpd_qfinalize will not be inlined into mpd_qminus 
mpd_qminus
5950
}
5951
5952
/* Like the plus operator. */
5953
void
5954
mpd_qplus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
5955
          uint32_t *status)
5956
{
5957
    if (mpd_isspecial(a)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qplus
inline
        
mpd_isspecial inlined into mpd_qplus 
mpd_qplus
5958
        if (mpd_qcheck_nan(result, a, ctx, status)) {
inline
            
mpd_qcheck_nan too costly to inline (cost=335, threshold=250) 
mpd_qplus
inline
            
mpd_qcheck_nan will not be inlined into mpd_qplus 
mpd_qplus
5959
            return;
5960
        }
5961
    }
5962
5963
    if (mpd_iszero(a) && ctx->round != MPD_ROUND_FLOOR) {
inline
        
mpd_iszero should always be inlined (cost=always) 
mpd_qplus
inline
        
mpd_iszero inlined into mpd_qplus 
mpd_qplus
gvn
                              
load of type i32 not eliminated because it is clobbered by call 
mpd_qplus
5964
        mpd_qcopy_abs(result, a, status);
inline
        
mpd_qcopy_abs can be inlined into mpd_qplus with cost=230 (threshold=250) 
mpd_qplus
inline
        
mpd_qcopy_abs inlined into mpd_qplus 
mpd_qplus
5965
    }
5966
    else {
5967
        mpd_qcopy(result, a, status);
inline
        
mpd_qcopy can be inlined into mpd_qplus with cost=215 (threshold=250) 
mpd_qplus
inline
        
mpd_qcopy inlined into mpd_qplus 
mpd_qplus
5968
    }
5969
5970
    mpd_qfinalize(result, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qplus
inline
    
mpd_qfinalize will not be inlined into mpd_qplus 
mpd_qplus
5971
}
5972
5973
/* The largest representable number that is smaller than the operand. */
5974
void
5975
mpd_qnext_minus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
5976
                uint32_t *status)
5977
{
5978
    mpd_context_t workctx;
5979
    MPD_NEW_CONST(tiny,MPD_POS,mpd_etiny(ctx)-1,1,1,1,1);
inline
    
mpd_etiny should always be inlined (cost=always) 
mpd_qnext_minus
inline
    
mpd_etiny inlined into mpd_qnext_minus 
mpd_qnext_minus
5980
5981
    if (mpd_isspecial(a)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qnext_minus
inline
        
mpd_isspecial inlined into mpd_qnext_minus 
mpd_qnext_minus
5982
        if (mpd_qcheck_nan(result, a, ctx, status)) {
inline
            
mpd_qcheck_nan too costly to inline (cost=335, threshold=250) 
mpd_qnext_minus
inline
            
mpd_qcheck_nan will not be inlined into mpd_qnext_minus 
mpd_qnext_minus
5983
            return;
5984
        }
5985
5986
        assert(mpd_isinfinite(a));
5987
        if (mpd_isnegative(a)) {
inline
            
mpd_isnegative should always be inlined (cost=always) 
mpd_qnext_minus
inline
            
mpd_isnegative inlined into mpd_qnext_minus 
mpd_qnext_minus
5988
            mpd_qcopy(result, a, status);
inline
            
mpd_qcopy can be inlined into mpd_qnext_minus with cost=215 (threshold=250) 
mpd_qnext_minus
inline
            
mpd_qcopy inlined into mpd_qnext_minus 
mpd_qnext_minus
5989
            return;
5990
        }
5991
        else {
5992
            mpd_clear_flags(result);
inline
            
mpd_clear_flags should always be inlined (cost=always) 
mpd_qnext_minus
inline
            
mpd_clear_flags inlined into mpd_qnext_minus 
mpd_qnext_minus
5993
            mpd_qmaxcoeff(result, ctx, status);
inline
            
mpd_qmaxcoeff too costly to inline (cost=250, threshold=250) 
mpd_qnext_minus
inline
            
mpd_qmaxcoeff will not be inlined into mpd_qnext_minus 
mpd_qnext_minus
5994
            if (mpd_isnan(result)) {
inline
                
mpd_isnan should always be inlined (cost=always) 
mpd_qnext_minus
inline
                
mpd_isnan inlined into mpd_qnext_minus 
mpd_qnext_minus
5995
                return;
5996
            }
5997
            result->exp = mpd_etop(ctx);
inline
                          
mpd_etop should always be inlined (cost=always) 
mpd_qnext_minus
inline
                          
mpd_etop inlined into mpd_qnext_minus 
mpd_qnext_minus
5998
            return;
5999
        }
6000
    }
6001
6002
    mpd_workcontext(&workctx, ctx);
inline
    
mpd_workcontext can be inlined into mpd_qnext_minus with cost=-10 (threshold=487) 
mpd_qnext_minus
inline
    
mpd_workcontext inlined into mpd_qnext_minus 
mpd_qnext_minus
6003
    workctx.round = MPD_ROUND_FLOOR;
6004
6005
    if (!mpd_qcopy(result, a, status)) {
inline
         
mpd_qcopy can be inlined into mpd_qnext_minus with cost=215 (threshold=250) 
mpd_qnext_minus
inline
         
mpd_qcopy inlined into mpd_qnext_minus 
mpd_qnext_minus
6006
        return;
6007
    }
6008
6009
    mpd_qfinalize(result, &workctx, &workctx.status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qnext_minus
inline
    
mpd_qfinalize will not be inlined into mpd_qnext_minus 
mpd_qnext_minus
6010
    if (workctx.status&(MPD_Inexact|MPD_Errors)) {
gvn
                
load of type i32 not eliminated in favor of store because it is clobbered by call 
mpd_qnext_minus
6011
        *status |= (workctx.status&MPD_Errors);
gvn
                
load of type i32 not eliminated because it is clobbered by call 
mpd_qnext_minus
6012
        return;
6013
    }
6014
6015
    workctx.status = 0;
6016
    mpd_qsub(result, a, &tiny, &workctx, &workctx.status);
inline
    
mpd_qsub too costly to inline (cost=670, threshold=625) 
mpd_qnext_minus
inline
    
mpd_qsub will not be inlined into mpd_qnext_minus 
mpd_qnext_minus
6017
    *status |= (workctx.status&MPD_Errors);
gvn
                        
load of type i32 not eliminated in favor of store because it is clobbered by call 
mpd_qnext_minus
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qnext_minus
6018
}
6019
6020
/* The smallest representable number that is larger than the operand. */
6021
void
6022
mpd_qnext_plus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
6023
               uint32_t *status)
6024
{
6025
    mpd_context_t workctx;
6026
    MPD_NEW_CONST(tiny,MPD_POS,mpd_etiny(ctx)-1,1,1,1,1);
inline
    
mpd_etiny should always be inlined (cost=always) 
mpd_qnext_plus
inline
    
mpd_etiny inlined into mpd_qnext_plus 
mpd_qnext_plus
6027
6028
    if (mpd_isspecial(a)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qnext_plus
inline
        
mpd_isspecial inlined into mpd_qnext_plus 
mpd_qnext_plus
6029
        if (mpd_qcheck_nan(result, a, ctx, status)) {
inline
            
mpd_qcheck_nan too costly to inline (cost=335, threshold=250) 
mpd_qnext_plus
inline
            
mpd_qcheck_nan will not be inlined into mpd_qnext_plus 
mpd_qnext_plus
6030
            return;
6031
        }
6032
6033
        assert(mpd_isinfinite(a));
6034
        if (mpd_ispositive(a)) {
inline
            
mpd_ispositive should always be inlined (cost=always) 
mpd_qnext_plus
inline
            
mpd_ispositive inlined into mpd_qnext_plus 
mpd_qnext_plus
6035
            mpd_qcopy(result, a, status);
inline
            
mpd_qcopy can be inlined into mpd_qnext_plus with cost=215 (threshold=250) 
mpd_qnext_plus
inline
            
mpd_qcopy inlined into mpd_qnext_plus 
mpd_qnext_plus
6036
        }
6037
        else {
6038
            mpd_clear_flags(result);
inline
            
mpd_clear_flags should always be inlined (cost=always) 
mpd_qnext_plus
inline
            
mpd_clear_flags inlined into mpd_qnext_plus 
mpd_qnext_plus
6039
            mpd_qmaxcoeff(result, ctx, status);
inline
            
mpd_qmaxcoeff too costly to inline (cost=250, threshold=250) 
mpd_qnext_plus
inline
            
mpd_qmaxcoeff will not be inlined into mpd_qnext_plus 
mpd_qnext_plus
6040
            if (mpd_isnan(result)) {
inline
                
mpd_isnan should always be inlined (cost=always) 
mpd_qnext_plus
inline
                
mpd_isnan inlined into mpd_qnext_plus 
mpd_qnext_plus
6041
                return;
6042
            }
6043
            mpd_set_flags(result, MPD_NEG);
inline
            
mpd_set_flags should always be inlined (cost=always) 
mpd_qnext_plus
inline
            
mpd_set_flags inlined into mpd_qnext_plus 
mpd_qnext_plus
6044
            result->exp = mpd_etop(ctx);
inline
                          
mpd_etop should always be inlined (cost=always) 
mpd_qnext_plus
inline
                          
mpd_etop inlined into mpd_qnext_plus 
mpd_qnext_plus
6045
        }
6046
        return;
6047
    }
6048
6049
    mpd_workcontext(&workctx, ctx);
inline
    
mpd_workcontext can be inlined into mpd_qnext_plus with cost=-10 (threshold=487) 
mpd_qnext_plus
inline
    
mpd_workcontext inlined into mpd_qnext_plus 
mpd_qnext_plus
6050
    workctx.round = MPD_ROUND_CEILING;
6051
6052
    if (!mpd_qcopy(result, a, status)) {
inline
         
mpd_qcopy can be inlined into mpd_qnext_plus with cost=215 (threshold=250) 
mpd_qnext_plus
inline
         
mpd_qcopy inlined into mpd_qnext_plus 
mpd_qnext_plus
6053
        return;
6054
    }
6055
6056
    mpd_qfinalize(result, &workctx, &workctx.status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qnext_plus
inline
    
mpd_qfinalize will not be inlined into mpd_qnext_plus 
mpd_qnext_plus
6057
    if (workctx.status & (MPD_Inexact|MPD_Errors)) {
gvn
                
load of type i32 not eliminated in favor of store because it is clobbered by call 
mpd_qnext_plus
6058
        *status |= (workctx.status&MPD_Errors);
gvn
                
load of type i32 not eliminated because it is clobbered by call 
mpd_qnext_plus
6059
        return;
6060
    }
6061
6062
    workctx.status = 0;
6063
    mpd_qadd(result, a, &tiny, &workctx, &workctx.status);
inline
    
mpd_qadd too costly to inline (cost=660, threshold=625) 
mpd_qnext_plus
inline
    
mpd_qadd will not be inlined into mpd_qnext_plus 
mpd_qnext_plus
6064
    *status |= (workctx.status&MPD_Errors);
gvn
                        
load of type i32 not eliminated in favor of store because it is clobbered by call 
mpd_qnext_plus
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qnext_plus
6065
}
6066
6067
/*
6068
 * The number closest to the first operand that is in the direction towards
6069
 * the second operand.
6070
 */
6071
void
6072
mpd_qnext_toward(mpd_t *result, const mpd_t *a, const mpd_t *b,
6073
                 const mpd_context_t *ctx, uint32_t *status)
6074
{
6075
    int c;
6076
6077
    if (mpd_qcheck_nans(result, a, b, ctx, status)) {
inline
        
mpd_qcheck_nans too costly to inline (cost=385, threshold=250) 
mpd_qnext_toward
inline
        
mpd_qcheck_nans will not be inlined into mpd_qnext_toward 
mpd_qnext_toward
6078
        return;
6079
    }
6080
6081
    c = _mpd_cmp(a, b);
inline
        
_mpd_cmp too costly to inline (cost=550, threshold=250) 
mpd_qnext_toward
inline
        
_mpd_cmp will not be inlined into mpd_qnext_toward 
mpd_qnext_toward
6082
    if (c == 0) {
6083
        mpd_qcopy_sign(result, a, b, status);
inline
        
mpd_qcopy_sign can be inlined into mpd_qnext_toward with cost=240 (threshold=250) 
mpd_qnext_toward
inline
        
mpd_qcopy_sign inlined into mpd_qnext_toward 
mpd_qnext_toward
6084
        return;
6085
    }
6086
6087
    if (c < 0) {
6088
        mpd_qnext_plus(result, a, ctx, status);
inline
        
mpd_qnext_plus too costly to inline (cost=630, threshold=625) 
mpd_qnext_toward
inline
        
mpd_qnext_plus will not be inlined into mpd_qnext_toward 
mpd_qnext_toward
6089
    }
6090
    else {
6091
        mpd_qnext_minus(result, a, ctx, status);
inline
        
mpd_qnext_minus too costly to inline (cost=630, threshold=625) 
mpd_qnext_toward
inline
        
mpd_qnext_minus will not be inlined into mpd_qnext_toward 
mpd_qnext_toward
6092
    }
6093
6094
    if (mpd_isinfinite(result)) {
inline
        
mpd_isinfinite should always be inlined (cost=always) 
mpd_qnext_toward
inline
        
mpd_isinfinite inlined into mpd_qnext_toward 
mpd_qnext_toward
6095
        *status |= (MPD_Overflow|MPD_Rounded|MPD_Inexact);
gvn
                
load of type i32 not eliminated because it is clobbered by call 
mpd_qnext_toward
6096
    }
6097
    else if (mpd_adjexp(result) < ctx->emin) {
inline
             
mpd_adjexp should always be inlined (cost=always) 
mpd_qnext_toward
inline
             
mpd_adjexp inlined into mpd_qnext_toward 
mpd_qnext_toward
gvn
                                       
load of type i64 not eliminated because it is clobbered by call 
mpd_qnext_toward
6098
        *status |= (MPD_Underflow|MPD_Subnormal|MPD_Rounded|MPD_Inexact);
gvn
                
load of type i32 not eliminated because it is clobbered by call 
mpd_qnext_toward
6099
        if (mpd_iszero(result)) {
inline
            
mpd_iszero should always be inlined (cost=always) 
mpd_qnext_toward
inline
            
mpd_iszero inlined into mpd_qnext_toward 
mpd_qnext_toward
6100
            *status |= MPD_Clamped;
gvn
                    
load of type i32 eliminated in favor of or 
mpd_qnext_toward
6101
        }
6102
    }
6103
}
6104
6105
/*
6106
 * Internal function: Integer power with mpd_uint_t exponent. The function
6107
 * can fail with MPD_Malloc_error.
6108
 *
6109
 * The error is equal to the error incurred in k-1 multiplications. Assuming
6110
 * the upper bound for the relative error in each operation:
6111
 *
6112
 *   abs(err) = 5 * 10**-prec
6113
 *   result = x**k * (1 + err)**(k-1)
6114
 */
6115
static inline void
6116
_mpd_qpow_uint(mpd_t *result, const mpd_t *base, mpd_uint_t exp,
6117
               uint8_t resultsign, const mpd_context_t *ctx, uint32_t *status)
6118
{
6119
    uint32_t workstatus = 0;
6120
    mpd_uint_t n;
6121
6122
    if (exp == 0) {
6123
        _settriple(result, resultsign, 1, 0); /* GCOV_NOT_REACHED */
inline
        
_settriple can be inlined into _mpd_qpow_uint with cost=180 (threshold=250) 
_mpd_qpow_uint
inline
        
_settriple inlined into _mpd_qpow_uint 
_mpd_qpow_uint
6124
        return; /* GCOV_NOT_REACHED */
6125
    }
6126
6127
    if (!mpd_qcopy(result, base, status)) {
inline
         
mpd_qcopy can be inlined into _mpd_qpow_uint with cost=215 (threshold=250) 
_mpd_qpow_uint
inline
         
mpd_qcopy inlined into _mpd_qpow_uint 
_mpd_qpow_uint
6128
        return;
6129
    }
6130
6131
    n = mpd_bits[mpd_bsr(exp)];
inline
                 
mpd_bsr can be inlined into _mpd_qpow_uint with cost=-15025 (threshold=487) 
_mpd_qpow_uint
inline
                 
mpd_bsr inlined into _mpd_qpow_uint 
_mpd_qpow_uint
6132
    while (n >>= 1) {
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
_mpd_qpow_uint
loop-vectorize
    
loop not vectorized 
_mpd_qpow_uint
6133
        mpd_qmul(result, result, result, ctx, &workstatus);
inline
        
mpd_qmul can be inlined into _mpd_qpow_uint with cost=45 (threshold=375) 
_mpd_qpow_uint
inline
        
mpd_qmul inlined into _mpd_qpow_uint 
_mpd_qpow_uint
6134
        if (exp & n) {
6135
            mpd_qmul(result, result, base, ctx, &workstatus);
inline
            
mpd_qmul can be inlined into _mpd_qpow_uint with cost=45 (threshold=375) 
_mpd_qpow_uint
inline
            
mpd_qmul inlined into _mpd_qpow_uint 
_mpd_qpow_uint
6136
        }
6137
        if (mpd_isspecial(result) ||
inline
            
mpd_isspecial should always be inlined (cost=always) 
_mpd_qpow_uint
inline
            
mpd_isspecial inlined into _mpd_qpow_uint 
_mpd_qpow_uint
6138
            (mpd_iszerocoeff(result) && (workstatus & MPD_Clamped))) {
inline
             
mpd_iszerocoeff should always be inlined (cost=always) 
_mpd_qpow_uint
inline
             
mpd_iszerocoeff inlined into _mpd_qpow_uint 
_mpd_qpow_uint
licm
                                         
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qpow_uint
gvn
                                         
load of type i32 not eliminated in favor of store because it is clobbered by call 
_mpd_qpow_uint
6139
            break;
6140
        }
6141
    }
6142
6143
    *status |= workstatus;
gvn
               
load of type i32 not eliminated in favor of store because it is clobbered by call 
_mpd_qpow_uint
gvn
               
load eliminated by PRE 
_mpd_qpow_uint
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qpow_uint
6144
    mpd_set_sign(result, resultsign);
inline
    
mpd_set_sign should always be inlined (cost=always) 
_mpd_qpow_uint
inline
    
mpd_set_sign inlined into _mpd_qpow_uint 
_mpd_qpow_uint
6145
}
6146
6147
/*
6148
 * Internal function: Integer power with mpd_t exponent, tbase and texp
6149
 * are modified!! Function can fail with MPD_Malloc_error.
6150
 *
6151
 * The error is equal to the error incurred in k multiplications. Assuming
6152
 * the upper bound for the relative error in each operation:
6153
 *
6154
 *   abs(err) = 5 * 10**-prec
6155
 *   result = x**k * (1 + err)**k
6156
 */
6157
static inline void
6158
_mpd_qpow_mpd(mpd_t *result, mpd_t *tbase, mpd_t *texp, uint8_t resultsign,
6159
              const mpd_context_t *ctx, uint32_t *status)
6160
{
6161
    uint32_t workstatus = 0;
6162
    mpd_context_t maxctx;
6163
    MPD_NEW_CONST(two,0,0,1,1,1,2);
6164
6165
6166
    mpd_maxcontext(&maxctx);
inline
    
mpd_maxcontext will not be inlined into _mpd_qpow_mpd because its definition is unavailable 
_mpd_qpow_mpd
6167
6168
    /* resize to smaller cannot fail */
6169
    mpd_qcopy(result, &one, status);
inline
    
mpd_qcopy can be inlined into _mpd_qpow_mpd with cost=215 (threshold=250) 
_mpd_qpow_mpd
inline
    
mpd_qcopy inlined into _mpd_qpow_mpd 
_mpd_qpow_mpd
6170
6171
    while (!mpd_iszero(texp)) {
inline
            
mpd_iszero should always be inlined (cost=always) 
_mpd_qpow_mpd
inline
            
mpd_iszero inlined into _mpd_qpow_mpd 
_mpd_qpow_mpd
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qpow
loop-vectorize
    
loop not vectorized 
mpd_qpow
6172
        if (mpd_isodd(texp)) {
inline
            
mpd_isodd can be inlined into _mpd_qpow_mpd with cost=95 (threshold=250) 
_mpd_qpow_mpd
inline
            
mpd_isodd inlined into _mpd_qpow_mpd 
_mpd_qpow_mpd
6173
            mpd_qmul(result, result, tbase, ctx, &workstatus);
inline
            
mpd_qmul can be inlined into _mpd_qpow_mpd with cost=45 (threshold=375) 
_mpd_qpow_mpd
inline
            
mpd_qmul inlined into _mpd_qpow_mpd 
_mpd_qpow_mpd
6174
            *status |= workstatus;
licm
                       
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qpow_mpd
licm
                    
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qpow_mpd
gvn
                       
load of type i32 not eliminated in favor of store because it is clobbered by call 
_mpd_qpow_mpd
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
_mpd_qpow_mpd
licm
                       
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qpow_int
licm
                    
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qpow_int
gvn
                       
load of type i32 not eliminated in favor of store because it is clobbered by call 
_mpd_qpow_int
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
_mpd_qpow_int
licm
                       
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qpow
licm
                    
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qpow
gvn
                       
load of type i32 not eliminated in favor of store because it is clobbered by call 
mpd_qpow
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
mpd_qpow
6175
            if (mpd_isspecial(result) ||
inline
                
mpd_isspecial should always be inlined (cost=always) 
_mpd_qpow_mpd
inline
                
mpd_isspecial inlined into _mpd_qpow_mpd 
_mpd_qpow_mpd
6176
                (mpd_iszerocoeff(result) && (workstatus & MPD_Clamped))) {
inline
                 
mpd_iszerocoeff should always be inlined (cost=always) 
_mpd_qpow_mpd
inline
                 
mpd_iszerocoeff inlined into _mpd_qpow_mpd 
_mpd_qpow_mpd
licm
                                             
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qpow_mpd
gvn
                                             
load of type i32 eliminated in favor of load 
_mpd_qpow_mpd
6177
                break;
6178
            }
6179
        }
6180
        mpd_qmul(tbase, tbase, tbase, ctx, &workstatus);
inline
        
mpd_qmul can be inlined into _mpd_qpow_mpd with cost=45 (threshold=375) 
_mpd_qpow_mpd
inline
        
mpd_qmul inlined into _mpd_qpow_mpd 
_mpd_qpow_mpd
6181
        mpd_qdivint(texp, texp, &two, &maxctx, &workstatus);
inline
        
mpd_qdivint too costly to inline (cost=670, threshold=625) 
_mpd_qpow_mpd
inline
        
mpd_qdivint will not be inlined into _mpd_qpow_mpd 
_mpd_qpow_mpd
inline
        
mpd_qdivint too costly to inline (cost=670, threshold=625) 
_mpd_qpow_int
inline
        
mpd_qdivint will not be inlined into _mpd_qpow_int 
_mpd_qpow_int
inline
        
mpd_qdivint too costly to inline (cost=670, threshold=625) 
mpd_qpow
inline
        
mpd_qdivint will not be inlined into mpd_qpow 
mpd_qpow
6182
        if (mpd_isnan(tbase) || mpd_isnan(texp)) {
inline
                                
mpd_isnan should always be inlined (cost=always) 
_mpd_qpow_mpd
inline
                                
mpd_isnan inlined into _mpd_qpow_mpd 
_mpd_qpow_mpd
inline
            
mpd_isnan should always be inlined (cost=always) 
_mpd_qpow_mpd
inline
            
mpd_isnan inlined into _mpd_qpow_mpd 
_mpd_qpow_mpd
6183
            mpd_seterror(result, workstatus&MPD_Errors, status);
inline
            
mpd_seterror can be inlined into _mpd_qpow_mpd with cost=130 (threshold=250) 
_mpd_qpow_mpd
inline
            
mpd_seterror inlined into _mpd_qpow_mpd 
_mpd_qpow_mpd
gvn
                                 
load of type i32 not eliminated in favor of store because it is clobbered by call 
_mpd_qpow_mpd
gvn
                                 
load of type i32 not eliminated in favor of store because it is clobbered by call 
_mpd_qpow_int
gvn
                                 
load of type i32 not eliminated in favor of store because it is clobbered by call 
mpd_qpow
6184
            return;
6185
        }
6186
    }
6187
    mpd_set_sign(result, resultsign);
inline
    
mpd_set_sign should always be inlined (cost=always) 
_mpd_qpow_mpd
inline
    
mpd_set_sign inlined into _mpd_qpow_mpd 
_mpd_qpow_mpd
6188
}
6189
6190
/*
6191
 * The power function for integer exponents. Relative error _before_ the
6192
 * final rounding to prec:
6193
 *   abs(result - base**exp) < 0.1 * 10**-prec * abs(base**exp)
6194
 */
6195
static void
6196
_mpd_qpow_int(mpd_t *result, const mpd_t *base, const mpd_t *exp,
6197
              uint8_t resultsign,
6198
              const mpd_context_t *ctx, uint32_t *status)
6199
{
6200
    mpd_context_t workctx;
6201
    MPD_NEW_STATIC(tbase,0,0,0,0);
6202
    MPD_NEW_STATIC(texp,0,0,0,0);
6203
    mpd_ssize_t n;
6204
6205
6206
    mpd_workcontext(&workctx, ctx);
inline
    
mpd_workcontext can be inlined into _mpd_qpow_int with cost=-15010 (threshold=487) 
_mpd_qpow_int
inline
    
mpd_workcontext inlined into _mpd_qpow_int 
_mpd_qpow_int
6207
    workctx.prec += (exp->digits + exp->exp + 2);
gvn
                 
load of type i64 eliminated in favor of load 
_mpd_qpow_int
gvn
                          
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qpow
gvn
                                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qpow
6208
    workctx.round = MPD_ROUND_HALF_EVEN;
6209
    workctx.clamp = 0;
6210
    if (mpd_isnegative(exp)) {
inline
        
mpd_isnegative should always be inlined (cost=always) 
_mpd_qpow_int
inline
        
mpd_isnegative inlined into _mpd_qpow_int 
_mpd_qpow_int
6211
        workctx.prec += 1;
gvn
                     
load of type i64 eliminated in favor of add 
_mpd_qpow_int
6212
        mpd_qdiv(&tbase, &one, base, &workctx, status);
inline
        
mpd_qdiv can be inlined into _mpd_qpow_int with cost=5 (threshold=375) 
_mpd_qpow_int
inline
        
mpd_qdiv inlined into _mpd_qpow_int 
_mpd_qpow_int
6213
        if (*status&MPD_Errors) {
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qpow_int
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qpow
6214
            mpd_setspecial(result, MPD_POS, MPD_NAN);
inline
            
mpd_setspecial can be inlined into _mpd_qpow_int with cost=115 (threshold=250) 
_mpd_qpow_int
inline
            
mpd_setspecial inlined into _mpd_qpow_int 
_mpd_qpow_int
6215
            goto finish;
6216
        }
6217
    }
6218
    else {
6219
        if (!mpd_qcopy(&tbase, base, status)) {
inline
             
mpd_qcopy can be inlined into _mpd_qpow_int with cost=215 (threshold=250) 
_mpd_qpow_int
inline
             
mpd_qcopy inlined into _mpd_qpow_int 
_mpd_qpow_int
6220
            mpd_setspecial(result, MPD_POS, MPD_NAN);
inline
            
mpd_setspecial can be inlined into _mpd_qpow_int with cost=115 (threshold=250) 
_mpd_qpow_int
inline
            
mpd_setspecial inlined into _mpd_qpow_int 
_mpd_qpow_int
6221
            goto finish;
6222
        }
6223
    }
6224
6225
    n = mpd_qabs_uint(exp, &workctx.status);
inline
        
mpd_qabs_uint can be inlined into _mpd_qpow_int with cost=5 (threshold=375) 
_mpd_qpow_int
inline
        
mpd_qabs_uint inlined into _mpd_qpow_int 
_mpd_qpow_int
6226
    if (workctx.status&MPD_Invalid_operation) {
gvn
                
load of type i32 not eliminated in favor of store because it is clobbered by call 
_mpd_qpow_int
gvn
                
load of type i32 not eliminated in favor of store because it is clobbered by call 
mpd_qpow
6227
        if (!mpd_qcopy(&texp, exp, status)) {
inline
             
mpd_qcopy can be inlined into _mpd_qpow_int with cost=215 (threshold=250) 
_mpd_qpow_int
inline
             
mpd_qcopy inlined into _mpd_qpow_int 
_mpd_qpow_int
6228
            mpd_setspecial(result, MPD_POS, MPD_NAN); /* GCOV_UNLIKELY */
inline
            
mpd_setspecial can be inlined into _mpd_qpow_int with cost=115 (threshold=250) 
_mpd_qpow_int
inline
            
mpd_setspecial inlined into _mpd_qpow_int 
_mpd_qpow_int
6229
            goto finish; /* GCOV_UNLIKELY */
6230
        }
6231
        _mpd_qpow_mpd(result, &tbase, &texp, resultsign, &workctx, status);
inline
        
_mpd_qpow_mpd can be inlined into _mpd_qpow_int with cost=-13975 (threshold=325) 
_mpd_qpow_int
inline
        
_mpd_qpow_mpd inlined into _mpd_qpow_int 
_mpd_qpow_int
6232
    }
6233
    else {
6234
        _mpd_qpow_uint(result, &tbase, n, resultsign, &workctx, status);
inline
        
_mpd_qpow_uint too costly to inline (cost=840, threshold=812) 
_mpd_qpow_int
inline
        
_mpd_qpow_uint will not be inlined into _mpd_qpow_int 
_mpd_qpow_int
inline
        
_mpd_qpow_uint too costly to inline (cost=840, threshold=812) 
mpd_qpow
inline
        
_mpd_qpow_uint will not be inlined into mpd_qpow 
mpd_qpow
6235
    }
6236
6237
    if (mpd_isinfinite(result)) {
inline
        
mpd_isinfinite should always be inlined (cost=always) 
_mpd_qpow_int
inline
        
mpd_isinfinite inlined into _mpd_qpow_int 
_mpd_qpow_int
6238
        /* for ROUND_DOWN, ROUND_FLOOR, etc. */
6239
        _settriple(result, resultsign, 1, MPD_EXP_INF);
inline
        
_settriple can be inlined into _mpd_qpow_int with cost=180 (threshold=250) 
_mpd_qpow_int
inline
        
_settriple inlined into _mpd_qpow_int 
_mpd_qpow_int
6240
    }
6241
6242
finish:
6243
    mpd_del(&tbase);
inline
    
mpd_del should always be inlined (cost=always) 
_mpd_qpow_int
inline
    
mpd_del inlined into _mpd_qpow_int 
_mpd_qpow_int
6244
    mpd_del(&texp);
inline
    
mpd_del should always be inlined (cost=always) 
_mpd_qpow_int
inline
    
mpd_del inlined into _mpd_qpow_int 
_mpd_qpow_int
6245
    mpd_qfinalize(result, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
_mpd_qpow_int
inline
    
mpd_qfinalize will not be inlined into _mpd_qpow_int 
_mpd_qpow_int
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qpow
inline
    
mpd_qfinalize will not be inlined into mpd_qpow 
mpd_qpow
6246
}
6247
6248
/*
6249
 * If the exponent is infinite and base equals one, the result is one
6250
 * with a coefficient of length prec. Otherwise, result is undefined.
6251
 * Return the value of the comparison against one.
6252
 */
6253
static int
6254
_qcheck_pow_one_inf(mpd_t *result, const mpd_t *base, uint8_t resultsign,
6255
                    const mpd_context_t *ctx, uint32_t *status)
6256
{
6257
    mpd_ssize_t shift;
6258
    int cmp;
6259
6260
    if ((cmp = _mpd_cmp(base, &one)) == 0) {
inline
               
_mpd_cmp too costly to inline (cost=550, threshold=250) 
_qcheck_pow_one_inf
inline
               
_mpd_cmp will not be inlined into _qcheck_pow_one_inf 
_qcheck_pow_one_inf
inline
               
_mpd_cmp too costly to inline (cost=550, threshold=250) 
mpd_qpow
inline
               
_mpd_cmp will not be inlined into mpd_qpow 
mpd_qpow
6261
        shift = ctx->prec-1;
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
_qcheck_pow_one_inf
gvn
                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qpow
6262
        mpd_qshiftl(result, &one, shift, status);
inline
        
mpd_qshiftl too costly to inline (cost=575, threshold=250) 
_qcheck_pow_one_inf
inline
        
mpd_qshiftl will not be inlined into _qcheck_pow_one_inf 
_qcheck_pow_one_inf
inline
        
mpd_qshiftl too costly to inline (cost=575, threshold=250) 
mpd_qpow
inline
        
mpd_qshiftl will not be inlined into mpd_qpow 
mpd_qpow
6263
        result->exp = -shift;
6264
        mpd_set_flags(result, resultsign);
inline
        
mpd_set_flags should always be inlined (cost=always) 
_qcheck_pow_one_inf
inline
        
mpd_set_flags inlined into _qcheck_pow_one_inf 
_qcheck_pow_one_inf
6265
        *status |= (MPD_Inexact|MPD_Rounded);
gvn
                
load of type i32 not eliminated because it is clobbered by call 
_qcheck_pow_one_inf
gvn
                
load of type i32 not eliminated because it is clobbered by call 
mpd_qpow
6266
    }
6267
6268
    return cmp;
6269
}
6270
6271
/*
6272
 * If abs(base) equals one, calculate the correct power of one result.
6273
 * Otherwise, result is undefined. Return the value of the comparison
6274
 * against 1.
6275
 *
6276
 * This is an internal function that does not check for specials.
6277
 */
6278
static int
6279
_qcheck_pow_one(mpd_t *result, const mpd_t *base, const mpd_t *exp,
6280
                uint8_t resultsign,
6281
                const mpd_context_t *ctx, uint32_t *status)
6282
{
6283
    uint32_t workstatus = 0;
6284
    mpd_ssize_t shift;
6285
    int cmp;
6286
6287
    if ((cmp = _mpd_cmp_abs(base, &one)) == 0) {
inline
               
_mpd_cmp_abs too costly to inline (cost=360, threshold=250) 
_qcheck_pow_one
inline
               
_mpd_cmp_abs will not be inlined into _qcheck_pow_one 
_qcheck_pow_one
inline
               
_mpd_cmp_abs too costly to inline (cost=360, threshold=250) 
mpd_qpow
inline
               
_mpd_cmp_abs will not be inlined into mpd_qpow 
mpd_qpow
6288
        if (_mpd_isint(exp)) {
inline
            
_mpd_isint can be inlined into _qcheck_pow_one with cost=110 (threshold=250) 
_qcheck_pow_one
inline
            
_mpd_isint inlined into _qcheck_pow_one 
_qcheck_pow_one
6289
            if (mpd_isnegative(exp)) {
inline
                
mpd_isnegative should always be inlined (cost=always) 
_qcheck_pow_one
inline
                
mpd_isnegative inlined into _qcheck_pow_one 
_qcheck_pow_one
6290
                _settriple(result, resultsign, 1, 0);
inline
                
_settriple can be inlined into _qcheck_pow_one with cost=180 (threshold=250) 
_qcheck_pow_one
inline
                
_settriple inlined into _qcheck_pow_one 
_qcheck_pow_one
6291
                return 0;
6292
            }
6293
            /* 1.000**3 = 1.000000000 */
6294
            mpd_qmul_ssize(result, exp, -base->exp, ctx, &workstatus);
inline
            
mpd_qmul_ssize too costly to inline (cost=300, threshold=250) 
_qcheck_pow_one
inline
            
mpd_qmul_ssize will not be inlined into _qcheck_pow_one 
_qcheck_pow_one
gvn
                                               
load of type i64 not eliminated because it is clobbered by call 
_qcheck_pow_one
inline
            
mpd_qmul_ssize too costly to inline (cost=300, threshold=250) 
mpd_qpow
inline
            
mpd_qmul_ssize will not be inlined into mpd_qpow 
mpd_qpow
gvn
                                               
load of type i64 not eliminated because it is clobbered by call 
mpd_qpow
6295
            if (workstatus&MPD_Errors) {
gvn
                
load of type i32 not eliminated in favor of store because it is clobbered by call 
_qcheck_pow_one
gvn
                
load of type i32 not eliminated in favor of store because it is clobbered by call 
mpd_qpow
6296
                *status |= (workstatus&MPD_Errors);
gvn
                        
load of type i32 not eliminated because it is clobbered by call 
_qcheck_pow_one
gvn
                        
load of type i32 not eliminated because it is clobbered by call 
mpd_qpow
6297
                return 0;
6298
            }
6299
            /* digits-1 after exponentiation */
6300
            shift = mpd_qget_ssize(result, &workstatus);
inline
                    
mpd_qget_ssize can be inlined into _qcheck_pow_one with cost=85 (threshold=250) 
_qcheck_pow_one
inline
                    
mpd_qget_ssize inlined into _qcheck_pow_one 
_qcheck_pow_one
6301
            /* shift is MPD_SSIZE_MAX if result is too large */
6302
            if (shift > ctx->prec-1) {
gvn
                             
load of type i64 not eliminated because it is clobbered by call 
_qcheck_pow_one
gvn
                             
load of type i64 not eliminated because it is clobbered by call 
mpd_qpow
6303
                shift = ctx->prec-1;
6304
                *status |= MPD_Rounded;
6305
            }
6306
        }
6307
        else if (mpd_ispositive(base)) {
inline
                 
mpd_ispositive should always be inlined (cost=always) 
_qcheck_pow_one
inline
                 
mpd_ispositive inlined into _qcheck_pow_one 
_qcheck_pow_one
6308
            shift = ctx->prec-1;
gvn
                         
load of type i64 not eliminated because it is clobbered by call 
_qcheck_pow_one
gvn
                         
load of type i64 not eliminated because it is clobbered by call 
mpd_qpow
6309
            *status |= (MPD_Inexact|MPD_Rounded);
6310
        }
6311
        else {
6312
            return -2; /* GCOV_NOT_REACHED */
6313
        }
6314
        if (!mpd_qshiftl(result, &one, shift, status)) {
inline
             
mpd_qshiftl too costly to inline (cost=575, threshold=250) 
_qcheck_pow_one
inline
             
mpd_qshiftl will not be inlined into _qcheck_pow_one 
_qcheck_pow_one
inline
             
mpd_qshiftl too costly to inline (cost=575, threshold=250) 
mpd_qpow
inline
             
mpd_qshiftl will not be inlined into mpd_qpow 
mpd_qpow
6315
            return 0;
6316
        }
6317
        result->exp = -shift;
6318
        mpd_set_flags(result, resultsign);
inline
        
mpd_set_flags should always be inlined (cost=always) 
_qcheck_pow_one
inline
        
mpd_set_flags inlined into _qcheck_pow_one 
_qcheck_pow_one
6319
    }
6320
6321
    return cmp;
6322
}
6323
6324
/*
6325
 * Detect certain over/underflow of x**y.
6326
 * ACL2 proof: pow-bounds.lisp.
6327
 *
6328
 *   Symbols:
6329
 *
6330
 *     e: EXP_INF or EXP_CLAMP
6331
 *     x: base
6332
 *     y: exponent
6333
 *
6334
 *     omega(e) = log10(abs(e))
6335
 *     zeta(x)  = log10(abs(log10(x)))
6336
 *     theta(y) = log10(abs(y))
6337
 *
6338
 *   Upper and lower bounds:
6339
 *
6340
 *     ub_omega(e) = ceil(log10(abs(e)))
6341
 *     lb_theta(y) = floor(log10(abs(y)))
6342
 *
6343
 *                  | floor(log10(floor(abs(log10(x))))) if x < 1/10 or x >= 10
6344
 *     lb_zeta(x) = | floor(log10(abs(x-1)/10)) if 1/10 <= x < 1
6345
 *                  | floor(log10(abs((x-1)/100))) if 1 < x < 10
6346
 *
6347
 *   ub_omega(e) and lb_theta(y) are obviously upper and lower bounds
6348
 *   for omega(e) and theta(y).
6349
 *
6350
 *   lb_zeta is a lower bound for zeta(x):
6351
 *
6352
 *     x < 1/10 or x >= 10:
6353
 *
6354
 *       abs(log10(x)) >= 1, so the outer log10 is well defined. Since log10
6355
 *       is strictly increasing, the end result is a lower bound.
6356
 *
6357
 *     1/10 <= x < 1:
6358
 *
6359
 *       We use: log10(x) <= (x-1)/log(10)
6360
 *               abs(log10(x)) >= abs(x-1)/log(10)
6361
 *               abs(log10(x)) >= abs(x-1)/10
6362
 *
6363
 *     1 < x < 10:
6364
 *
6365
 *       We use: (x-1)/(x*log(10)) < log10(x)
6366
 *               abs((x-1)/100) < abs(log10(x))
6367
 *
6368
 *       XXX: abs((x-1)/10) would work, need ACL2 proof.
6369
 *
6370
 *
6371
 *   Let (0 < x < 1 and y < 0) or (x > 1 and y > 0).                  (H1)
6372
 *   Let ub_omega(exp_inf) < lb_zeta(x) + lb_theta(y)                 (H2)
6373
 *
6374
 *   Then:
6375
 *       log10(abs(exp_inf)) < log10(abs(log10(x))) + log10(abs(y)).   (1)
6376
 *                   exp_inf < log10(x) * y                            (2)
6377
 *               10**exp_inf < x**y                                    (3)
6378
 *
6379
 *   Let (0 < x < 1 and y > 0) or (x > 1 and y < 0).                  (H3)
6380
 *   Let ub_omega(exp_clamp) < lb_zeta(x) + lb_theta(y)               (H4)
6381
 *
6382
 *   Then:
6383
 *     log10(abs(exp_clamp)) < log10(abs(log10(x))) + log10(abs(y)).   (4)
6384
 *              log10(x) * y < exp_clamp                               (5)
6385
 *                      x**y < 10**exp_clamp                           (6)
6386
 *
6387
 */
6388
static mpd_ssize_t
6389
_lower_bound_zeta(const mpd_t *x, uint32_t *status)
6390
{
6391
    mpd_context_t maxctx;
6392
    MPD_NEW_STATIC(scratch,0,0,0,0);
6393
    mpd_ssize_t t, u;
6394
6395
    t = mpd_adjexp(x);
inline
        
mpd_adjexp should always be inlined (cost=always) 
_lower_bound_zeta
inline
        
mpd_adjexp inlined into _lower_bound_zeta 
_lower_bound_zeta
6396
    if (t > 0) {
6397
        /* x >= 10 -> floor(log10(floor(abs(log10(x))))) */
6398
        return mpd_exp_digits(t) - 1;
inline
               
mpd_exp_digits can be inlined into _lower_bound_zeta with cost=265 (threshold=325) 
_lower_bound_zeta
inline
               
mpd_exp_digits inlined into _lower_bound_zeta 
_lower_bound_zeta
6399
    }
6400
    else if (t < -1) {
6401
        /* x < 1/10 -> floor(log10(floor(abs(log10(x))))) */
6402
        return mpd_exp_digits(t+1) - 1;
inline
               
mpd_exp_digits can be inlined into _lower_bound_zeta with cost=265 (threshold=325) 
_lower_bound_zeta
inline
               
mpd_exp_digits inlined into _lower_bound_zeta 
_lower_bound_zeta
6403
    }
6404
    else {
6405
        mpd_maxcontext(&maxctx);
inline
        
mpd_maxcontext will not be inlined into _lower_bound_zeta because its definition is unavailable 
_lower_bound_zeta
6406
        mpd_qsub(&scratch, x, &one, &maxctx, status);
inline
        
mpd_qsub too costly to inline (cost=670, threshold=625) 
_lower_bound_zeta
inline
        
mpd_qsub will not be inlined into _lower_bound_zeta 
_lower_bound_zeta
inline
        
mpd_qsub too costly to inline (cost=670, threshold=625) 
_qcheck_pow_bounds
inline
        
mpd_qsub will not be inlined into _qcheck_pow_bounds 
_qcheck_pow_bounds
inline
        
mpd_qsub too costly to inline (cost=670, threshold=625) 
mpd_qpow
inline
        
mpd_qsub will not be inlined into mpd_qpow 
mpd_qpow
6407
        if (mpd_isspecial(&scratch)) {
inline
            
mpd_isspecial should always be inlined (cost=always) 
_lower_bound_zeta
inline
            
mpd_isspecial inlined into _lower_bound_zeta 
_lower_bound_zeta
6408
            mpd_del(&scratch);
inline
            
mpd_del should always be inlined (cost=always) 
_lower_bound_zeta
inline
            
mpd_del inlined into _lower_bound_zeta 
_lower_bound_zeta
6409
            return MPD_SSIZE_MAX;
6410
        }
6411
        u = mpd_adjexp(&scratch);
inline
            
mpd_adjexp should always be inlined (cost=always) 
_lower_bound_zeta
inline
            
mpd_adjexp inlined into _lower_bound_zeta 
_lower_bound_zeta
6412
        mpd_del(&scratch);
inline
        
mpd_del should always be inlined (cost=always) 
_lower_bound_zeta
inline
        
mpd_del inlined into _lower_bound_zeta 
_lower_bound_zeta
6413
6414
        /* t == -1, 1/10 <= x < 1 -> floor(log10(abs(x-1)/10))
6415
         * t == 0,  1 < x < 10    -> floor(log10(abs(x-1)/100)) */
6416
        return (t == 0) ? u-2 : u-1;
6417
    }
6418
}
6419
6420
/*
6421
 * Detect cases of certain overflow/underflow in the power function.
6422
 * Assumptions: x != 1, y != 0. The proof above is for positive x.
6423
 * If x is negative and y is an odd integer, x**y == -(abs(x)**y),
6424
 * so the analysis does not change.
6425
 */
6426
static int
6427
_qcheck_pow_bounds(mpd_t *result, const mpd_t *x, const mpd_t *y,
6428
                   uint8_t resultsign,
6429
                   const mpd_context_t *ctx, uint32_t *status)
6430
{
6431
    MPD_NEW_SHARED(abs_x, x);
gvn
    
load of type i8 not eliminated because it is clobbered by call 
mpd_qpow
gvn
    
load eliminated by PRE 
mpd_qpow
6432
    mpd_ssize_t ub_omega, lb_zeta, lb_theta;
6433
    uint8_t sign;
6434
6435
    mpd_set_positive(&abs_x);
inline
    
mpd_set_positive should always be inlined (cost=always) 
_qcheck_pow_bounds
inline
    
mpd_set_positive inlined into _qcheck_pow_bounds 
_qcheck_pow_bounds
6436
6437
    lb_theta = mpd_adjexp(y);
inline
               
mpd_adjexp should always be inlined (cost=always) 
_qcheck_pow_bounds
inline
               
mpd_adjexp inlined into _qcheck_pow_bounds 
_qcheck_pow_bounds
6438
    lb_zeta = _lower_bound_zeta(&abs_x, status);
inline
              
_lower_bound_zeta can be inlined into _qcheck_pow_bounds with cost=-14095 (threshold=250) 
_qcheck_pow_bounds
inline
              
_lower_bound_zeta inlined into _qcheck_pow_bounds 
_qcheck_pow_bounds
6439
    if (lb_zeta == MPD_SSIZE_MAX) {
6440
        mpd_seterror(result, MPD_Malloc_error, status);
inline
        
mpd_seterror can be inlined into _qcheck_pow_bounds with cost=130 (threshold=250) 
_qcheck_pow_bounds
inline
        
mpd_seterror inlined into _qcheck_pow_bounds 
_qcheck_pow_bounds
6441
        return 1;
6442
    }
6443
6444
    sign = (mpd_adjexp(&abs_x) < 0) ^ mpd_sign(y);
inline
            
mpd_adjexp should always be inlined (cost=always) 
_qcheck_pow_bounds
inline
            
mpd_adjexp inlined into _qcheck_pow_bounds 
_qcheck_pow_bounds
inline
                                      
mpd_sign should always be inlined (cost=always) 
_qcheck_pow_bounds
inline
                                      
mpd_sign inlined into _qcheck_pow_bounds 
_qcheck_pow_bounds
6445
    if (sign == 0) {
6446
        /* (0 < |x| < 1 and y < 0) or (|x| > 1 and y > 0) */
6447
        ub_omega = mpd_exp_digits(ctx->emax);
inline
                   
mpd_exp_digits can be inlined into _qcheck_pow_bounds with cost=265 (threshold=325) 
_qcheck_pow_bounds
inline
                   
mpd_exp_digits inlined into _qcheck_pow_bounds 
_qcheck_pow_bounds
gvn
                                       
load of type i64 not eliminated because it is clobbered by call 
_qcheck_pow_bounds
gvn
                                       
load of type i64 not eliminated because it is clobbered by call 
mpd_qpow
6448
        if (ub_omega < lb_zeta + lb_theta) {
6449
            _settriple(result, resultsign, 1, MPD_EXP_INF);
inline
            
_settriple can be inlined into _qcheck_pow_bounds with cost=180 (threshold=250) 
_qcheck_pow_bounds
inline
            
_settriple inlined into _qcheck_pow_bounds 
_qcheck_pow_bounds
6450
            mpd_qfinalize(result, ctx, status);
inline
            
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
_qcheck_pow_bounds
inline
            
mpd_qfinalize will not be inlined into _qcheck_pow_bounds 
_qcheck_pow_bounds
inline
            
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qpow
inline
            
mpd_qfinalize will not be inlined into mpd_qpow 
mpd_qpow
6451
            return 1;
6452
        }
6453
    }
6454
    else {
6455
        /* (0 < |x| < 1 and y > 0) or (|x| > 1 and y < 0). */
6456
        ub_omega = mpd_exp_digits(mpd_etiny(ctx));
inline
                                  
mpd_etiny should always be inlined (cost=always) 
_qcheck_pow_bounds
inline
                                  
mpd_etiny inlined into _qcheck_pow_bounds 
_qcheck_pow_bounds
inline
                   
mpd_exp_digits can be inlined into _qcheck_pow_bounds with cost=265 (threshold=325) 
_qcheck_pow_bounds
inline
                   
mpd_exp_digits inlined into _qcheck_pow_bounds 
_qcheck_pow_bounds
6457
        if (ub_omega < lb_zeta + lb_theta) {
6458
            _settriple(result, resultsign, 1, mpd_etiny(ctx)-1);
inline
            
_settriple can be inlined into _qcheck_pow_bounds with cost=180 (threshold=250) 
_qcheck_pow_bounds
inline
            
_settriple inlined into _qcheck_pow_bounds 
_qcheck_pow_bounds
inline
                                              
mpd_etiny should always be inlined (cost=always) 
_qcheck_pow_bounds
inline
                                              
mpd_etiny inlined into _qcheck_pow_bounds 
_qcheck_pow_bounds
6459
            mpd_qfinalize(result, ctx, status);
inline
            
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
_qcheck_pow_bounds
inline
            
mpd_qfinalize will not be inlined into _qcheck_pow_bounds 
_qcheck_pow_bounds
inline
            
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qpow
inline
            
mpd_qfinalize will not be inlined into mpd_qpow 
mpd_qpow
6460
            return 1;
6461
        }
6462
    }
6463
6464
    return 0;
6465
}
6466
6467
/*
6468
 * TODO: Implement algorithm for computing exact powers from decimal.py.
6469
 * In order to prevent infinite loops, this has to be called before
6470
 * using Ziv's strategy for correct rounding.
6471
 */
6472
/*
6473
static int
6474
_mpd_qpow_exact(mpd_t *result, const mpd_t *base, const mpd_t *exp,
6475
                const mpd_context_t *ctx, uint32_t *status)
6476
{
6477
    return 0;
6478
}
6479
*/
6480
6481
/*
6482
 * The power function for real exponents.
6483
 *   Relative error: abs(result - e**y) < e**y * 1/5 * 10**(-prec - 1)
6484
 */
6485
static void
6486
_mpd_qpow_real(mpd_t *result, const mpd_t *base, const mpd_t *exp,
6487
               const mpd_context_t *ctx, uint32_t *status)
6488
{
6489
    mpd_context_t workctx;
6490
    MPD_NEW_STATIC(texp,0,0,0,0);
6491
6492
    if (!mpd_qcopy(&texp, exp, status)) {
inline
         
mpd_qcopy can be inlined into _mpd_qpow_real with cost=215 (threshold=250) 
_mpd_qpow_real
inline
         
mpd_qcopy inlined into _mpd_qpow_real 
_mpd_qpow_real
6493
        mpd_seterror(result, MPD_Malloc_error, status);
inline
        
mpd_seterror can be inlined into _mpd_qpow_real with cost=130 (threshold=250) 
_mpd_qpow_real
inline
        
mpd_seterror inlined into _mpd_qpow_real 
_mpd_qpow_real
6494
        return;
6495
    }
6496
6497
    mpd_maxcontext(&workctx);
inline
    
mpd_maxcontext will not be inlined into _mpd_qpow_real because its definition is unavailable 
_mpd_qpow_real
6498
    workctx.prec = (base->digits > ctx->prec) ? base->digits : ctx->prec;
gvn
                          
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpow_real
gvn
                                        
load of type i64 not eliminated because it is clobbered by call 
_mpd_qpow_real
gvn
                          
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qpow
gvn
                                        
load of type i64 not eliminated because it is clobbered by call 
mpd_qpow
6499
    workctx.prec += (4 + MPD_EXPDIGITS);
6500
    workctx.round = MPD_ROUND_HALF_EVEN;
6501
    workctx.allcr = ctx->allcr;
gvn
                         
load of type i32 not eliminated because it is clobbered by call 
_mpd_qpow_real
gvn
                         
load of type i32 not eliminated because it is clobbered by call 
mpd_qpow
6502
6503
    /*
6504
     * extra := MPD_EXPDIGITS = MPD_EXP_MAX_T
6505
     * wp := prec + 4 + extra
6506
     * abs(err) < 5 * 10**-wp
6507
     * y := log(base) * exp
6508
     * Calculate:
6509
     *   1)   e**(y * (1 + err)**2) * (1 + err)
6510
     *      = e**y * e**(y * (2*err + err**2)) * (1 + err)
6511
     *        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
6512
     * Relative error of the underlined term:
6513
     *   2) abs(e**(y * (2*err + err**2)) - 1)
6514
     * Case abs(y) >= 10**extra:
6515
     *   3) adjexp(y)+1 > log10(abs(y)) >= extra
6516
     *   This triggers the Overflow/Underflow shortcut in _mpd_qexp(),
6517
     *   so no further analysis is necessary.
6518
     * Case abs(y) < 10**extra:
6519
     *   4) abs(y * (2*err + err**2)) < 1/5 * 10**(-prec - 2)
6520
     *   Use (see _mpd_qexp):
6521
     *     5) abs(x) <= 9/10 * 10**-p ==> abs(e**x - 1) < 10**-p
6522
     *   With 2), 4) and 5):
6523
     *     6) abs(e**(y * (2*err + err**2)) - 1) < 10**(-prec - 2)
6524
     *   The complete relative error of 1) is:
6525
     *     7) abs(result - e**y) < e**y * 1/5 * 10**(-prec - 1)
6526
     */
6527
    mpd_qln(result, base, &workctx, &workctx.status);
inline
    
mpd_qln too costly to inline (cost=630, threshold=625) 
_mpd_qpow_real
inline
    
mpd_qln will not be inlined into _mpd_qpow_real 
_mpd_qpow_real
inline
    
mpd_qln too costly to inline (cost=630, threshold=625) 
mpd_qpow
inline
    
mpd_qln will not be inlined into mpd_qpow 
mpd_qpow
6528
    mpd_qmul(result, result, &texp, &workctx, &workctx.status);
inline
    
mpd_qmul can be inlined into _mpd_qpow_real with cost=45 (threshold=375) 
_mpd_qpow_real
inline
    
mpd_qmul inlined into _mpd_qpow_real 
_mpd_qpow_real
6529
    mpd_qexp(result, result, &workctx, status);
inline
    
mpd_qexp too costly to inline (cost=630, threshold=625) 
_mpd_qpow_real
inline
    
mpd_qexp will not be inlined into _mpd_qpow_real 
_mpd_qpow_real
inline
    
mpd_qexp too costly to inline (cost=630, threshold=625) 
mpd_qpow
inline
    
mpd_qexp will not be inlined into mpd_qpow 
mpd_qpow
6530
6531
    mpd_del(&texp);
inline
    
mpd_del should always be inlined (cost=always) 
_mpd_qpow_real
inline
    
mpd_del inlined into _mpd_qpow_real 
_mpd_qpow_real
6532
    *status |= (workctx.status&MPD_Errors);
gvn
                        
load of type i32 not eliminated because it is clobbered by call 
_mpd_qpow_real
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qpow_real
gvn
                        
load of type i32 not eliminated because it is clobbered by call 
mpd_qpow
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qpow
6533
    *status |= (MPD_Inexact|MPD_Rounded);
6534
}
6535
6536
/* The power function: base**exp */
6537
void
6538
mpd_qpow(mpd_t *result, const mpd_t *base, const mpd_t *exp,
6539
         const mpd_context_t *ctx, uint32_t *status)
6540
{
6541
    uint8_t resultsign = 0;
6542
    int intexp = 0;
6543
    int cmp;
6544
6545
    if (mpd_isspecial(base) || mpd_isspecial(exp)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qpow
inline
        
mpd_isspecial inlined into mpd_qpow 
mpd_qpow
inline
                               
mpd_isspecial should always be inlined (cost=always) 
mpd_qpow
inline
                               
mpd_isspecial inlined into mpd_qpow 
mpd_qpow
6546
        if (mpd_qcheck_nans(result, base, exp, ctx, status)) {
inline
            
mpd_qcheck_nans too costly to inline (cost=385, threshold=250) 
mpd_qpow
inline
            
mpd_qcheck_nans will not be inlined into mpd_qpow 
mpd_qpow
6547
            return;
6548
        }
6549
    }
6550
    if (mpd_isinteger(exp)) {
inline
        
mpd_isinteger can be inlined into mpd_qpow with cost=130 (threshold=250) 
mpd_qpow
inline
        
mpd_isinteger inlined into mpd_qpow 
mpd_qpow
6551
        intexp = 1;
6552
        resultsign = mpd_isnegative(base) && mpd_isodd(exp);
inline
                     
mpd_isnegative should always be inlined (cost=always) 
mpd_qpow
inline
                     
mpd_isnegative inlined into mpd_qpow 
mpd_qpow
inline
                                             
mpd_isodd can be inlined into mpd_qpow with cost=95 (threshold=250) 
mpd_qpow
inline
                                             
mpd_isodd inlined into mpd_qpow 
mpd_qpow
6553
    }
6554
6555
    if (mpd_iszero(base)) {
inline
        
mpd_iszero should always be inlined (cost=always) 
mpd_qpow
inline
        
mpd_iszero inlined into mpd_qpow 
mpd_qpow
6556
        if (mpd_iszero(exp)) {
inline
            
mpd_iszero should always be inlined (cost=always) 
mpd_qpow
inline
            
mpd_iszero inlined into mpd_qpow 
mpd_qpow
6557
            mpd_seterror(result, MPD_Invalid_operation, status);
inline
            
mpd_seterror can be inlined into mpd_qpow with cost=130 (threshold=250) 
mpd_qpow
inline
            
mpd_seterror inlined into mpd_qpow 
mpd_qpow
6558
        }
6559
        else if (mpd_isnegative(exp)) {
inline
                 
mpd_isnegative should always be inlined (cost=always) 
mpd_qpow
inline
                 
mpd_isnegative inlined into mpd_qpow 
mpd_qpow
6560
            mpd_setspecial(result, resultsign, MPD_INF);
inline
            
mpd_setspecial can be inlined into mpd_qpow with cost=120 (threshold=250) 
mpd_qpow
inline
            
mpd_setspecial inlined into mpd_qpow 
mpd_qpow
6561
        }
6562
        else {
6563
            _settriple(result, resultsign, 0, 0);
inline
            
_settriple can be inlined into mpd_qpow with cost=180 (threshold=250) 
mpd_qpow
inline
            
_settriple inlined into mpd_qpow 
mpd_qpow
6564
        }
6565
        return;
6566
    }
6567
    if (mpd_isnegative(base)) {
inline
        
mpd_isnegative should always be inlined (cost=always) 
mpd_qpow
inline
        
mpd_isnegative inlined into mpd_qpow 
mpd_qpow
6568
        if (!intexp || mpd_isinfinite(exp)) {
inline
                       
mpd_isinfinite should always be inlined (cost=always) 
mpd_qpow
inline
                       
mpd_isinfinite inlined into mpd_qpow 
mpd_qpow
6569
            mpd_seterror(result, MPD_Invalid_operation, status);
inline
            
mpd_seterror can be inlined into mpd_qpow with cost=130 (threshold=250) 
mpd_qpow
inline
            
mpd_seterror inlined into mpd_qpow 
mpd_qpow
6570
            return;
6571
        }
6572
    }
6573
    if (mpd_isinfinite(exp)) {
inline
        
mpd_isinfinite should always be inlined (cost=always) 
mpd_qpow
inline
        
mpd_isinfinite inlined into mpd_qpow 
mpd_qpow
6574
        /* power of one */
6575
        cmp = _qcheck_pow_one_inf(result, base, resultsign, ctx, status);
inline
              
_qcheck_pow_one_inf can be inlined into mpd_qpow with cost=-14900 (threshold=250) 
mpd_qpow
inline
              
_qcheck_pow_one_inf inlined into mpd_qpow 
mpd_qpow
6576
        if (cmp == 0) {
6577
            return;
6578
        }
6579
        else {
6580
            cmp *= mpd_arith_sign(exp);
inline
                   
mpd_arith_sign should always be inlined (cost=always) 
mpd_qpow
inline
                   
mpd_arith_sign inlined into mpd_qpow 
mpd_qpow
6581
            if (cmp < 0) {
6582
                _settriple(result, resultsign, 0, 0);
inline
                
_settriple can be inlined into mpd_qpow with cost=180 (threshold=250) 
mpd_qpow
inline
                
_settriple inlined into mpd_qpow 
mpd_qpow
6583
            }
6584
            else {
6585
                mpd_setspecial(result, resultsign, MPD_INF);
inline
                
mpd_setspecial can be inlined into mpd_qpow with cost=120 (threshold=250) 
mpd_qpow
inline
                
mpd_setspecial inlined into mpd_qpow 
mpd_qpow
6586
            }
6587
        }
6588
        return;
6589
    }
6590
    if (mpd_isinfinite(base)) {
inline
        
mpd_isinfinite should always be inlined (cost=always) 
mpd_qpow
inline
        
mpd_isinfinite inlined into mpd_qpow 
mpd_qpow
6591
        if (mpd_iszero(exp)) {
inline
            
mpd_iszero should always be inlined (cost=always) 
mpd_qpow
inline
            
mpd_iszero inlined into mpd_qpow 
mpd_qpow
6592
            _settriple(result, resultsign, 1, 0);
inline
            
_settriple can be inlined into mpd_qpow with cost=180 (threshold=250) 
mpd_qpow
inline
            
_settriple inlined into mpd_qpow 
mpd_qpow
6593
        }
6594
        else if (mpd_isnegative(exp)) {
inline
                 
mpd_isnegative should always be inlined (cost=always) 
mpd_qpow
inline
                 
mpd_isnegative inlined into mpd_qpow 
mpd_qpow
6595
            _settriple(result, resultsign, 0, 0);
inline
            
_settriple can be inlined into mpd_qpow with cost=180 (threshold=250) 
mpd_qpow
inline
            
_settriple inlined into mpd_qpow 
mpd_qpow
6596
        }
6597
        else {
6598
            mpd_setspecial(result, resultsign, MPD_INF);
inline
            
mpd_setspecial can be inlined into mpd_qpow with cost=120 (threshold=250) 
mpd_qpow
inline
            
mpd_setspecial inlined into mpd_qpow 
mpd_qpow
6599
        }
6600
        return;
6601
    }
6602
    if (mpd_iszero(exp)) {
6603
        _settriple(result, resultsign, 1, 0);
inline
        
_settriple can be inlined into mpd_qpow with cost=180 (threshold=250) 
mpd_qpow
inline
        
_settriple inlined into mpd_qpow 
mpd_qpow
6604
        return;
6605
    }
6606
    if (_qcheck_pow_one(result, base, exp, resultsign, ctx, status) == 0) {
inline
        
_qcheck_pow_one can be inlined into mpd_qpow with cost=-13980 (threshold=250) 
mpd_qpow
inline
        
_qcheck_pow_one inlined into mpd_qpow 
mpd_qpow
6607
        return;
6608
    }
6609
    if (_qcheck_pow_bounds(result, base, exp, resultsign, ctx, status)) {
inline
        
_qcheck_pow_bounds can be inlined into mpd_qpow with cost=-12110 (threshold=250) 
mpd_qpow
inline
        
_qcheck_pow_bounds inlined into mpd_qpow 
mpd_qpow
6610
        return;
6611
    }
6612
6613
    if (intexp) {
6614
        _mpd_qpow_int(result, base, exp, resultsign, ctx, status);
inline
        
_mpd_qpow_int can be inlined into mpd_qpow with cost=-12020 (threshold=250) 
mpd_qpow
inline
        
_mpd_qpow_int inlined into mpd_qpow 
mpd_qpow
6615
    }
6616
    else {
6617
        _mpd_qpow_real(result, base, exp, ctx, status);
inline
        
_mpd_qpow_real can be inlined into mpd_qpow with cost=-14295 (threshold=250) 
mpd_qpow
inline
        
_mpd_qpow_real inlined into mpd_qpow 
mpd_qpow
6618
        if (!mpd_isspecial(result) && _mpd_cmp(result, &one) == 0) {
inline
                                      
_mpd_cmp too costly to inline (cost=550, threshold=250) 
mpd_qpow
inline
                                      
_mpd_cmp will not be inlined into mpd_qpow 
mpd_qpow
inline
             
mpd_isspecial should always be inlined (cost=always) 
mpd_qpow
inline
             
mpd_isspecial inlined into mpd_qpow 
mpd_qpow
6619
            mpd_ssize_t shift = ctx->prec-1;
gvn
                                     
load of type i64 not eliminated because it is clobbered by call 
mpd_qpow
6620
            mpd_qshiftl(result, &one, shift, status);
inline
            
mpd_qshiftl too costly to inline (cost=575, threshold=250) 
mpd_qpow
inline
            
mpd_qshiftl will not be inlined into mpd_qpow 
mpd_qpow
6621
            result->exp = -shift;
6622
        }
6623
        if (mpd_isinfinite(result)) {
inline
            
mpd_isinfinite should always be inlined (cost=always) 
mpd_qpow
inline
            
mpd_isinfinite inlined into mpd_qpow 
mpd_qpow
6624
            /* for ROUND_DOWN, ROUND_FLOOR, etc. */
6625
            _settriple(result, MPD_POS, 1, MPD_EXP_INF);
inline
            
_settriple can be inlined into mpd_qpow with cost=180 (threshold=250) 
mpd_qpow
inline
            
_settriple inlined into mpd_qpow 
mpd_qpow
6626
        }
6627
        mpd_qfinalize(result, ctx, status);
inline
        
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qpow
inline
        
mpd_qfinalize will not be inlined into mpd_qpow 
mpd_qpow
6628
    }
6629
}
6630
6631
/*
6632
 * Internal function: Integer powmod with mpd_uint_t exponent, base is modified!
6633
 * Function can fail with MPD_Malloc_error.
6634
 */
6635
static inline void
6636
_mpd_qpowmod_uint(mpd_t *result, mpd_t *base, mpd_uint_t exp,
6637
                  const mpd_t *mod, uint32_t *status)
6638
{
6639
    mpd_context_t maxcontext;
licm
    
hosting bitcast 
mpd_qpowmod
6640
6641
    mpd_maxcontext(&maxcontext);
inline
    
mpd_maxcontext will not be inlined into _mpd_qpowmod_uint because its definition is unavailable 
_mpd_qpowmod_uint
6642
6643
    /* resize to smaller cannot fail */
6644
    mpd_qcopy(result, &one, status);
inline
    
mpd_qcopy can be inlined into _mpd_qpowmod_uint with cost=215 (threshold=250) 
_mpd_qpowmod_uint
inline
    
mpd_qcopy inlined into _mpd_qpowmod_uint 
_mpd_qpowmod_uint
6645
6646
    while (exp > 0) {
loop-unroll
    
completely unrolled loop with 4 iterations 
_mpd_qpowmod_uint
6647
        if (exp & 1) {
6648
            _mpd_qmul_exact(result, result, base, &maxcontext, status);
inline
            
_mpd_qmul_exact too costly to inline (cost=265, threshold=250) 
_mpd_qpowmod_uint
inline
            
_mpd_qmul_exact will not be inlined into _mpd_qpowmod_uint 
_mpd_qpowmod_uint
inline
            
_mpd_qmul_exact too costly to inline (cost=265, threshold=250) 
mpd_qpowmod
inline
            
_mpd_qmul_exact will not be inlined into mpd_qpowmod 
mpd_qpowmod
6649
            mpd_qrem(result, result, mod, &maxcontext, status);
inline
            
mpd_qrem too costly to inline (cost=665, threshold=625) 
_mpd_qpowmod_uint
inline
            
mpd_qrem will not be inlined into _mpd_qpowmod_uint 
_mpd_qpowmod_uint
inline
            
mpd_qrem too costly to inline (cost=665, threshold=625) 
mpd_qpowmod
inline
            
mpd_qrem will not be inlined into mpd_qpowmod 
mpd_qpowmod
6650
        }
6651
        _mpd_qmul_exact(base, base, base, &maxcontext, status);
inline
        
_mpd_qmul_exact too costly to inline (cost=265, threshold=250) 
_mpd_qpowmod_uint
inline
        
_mpd_qmul_exact will not be inlined into _mpd_qpowmod_uint 
_mpd_qpowmod_uint
inline
        
_mpd_qmul_exact too costly to inline (cost=265, threshold=250) 
mpd_qpowmod
inline
        
_mpd_qmul_exact will not be inlined into mpd_qpowmod 
mpd_qpowmod
6652
        mpd_qrem(base, base, mod, &maxcontext, status);
inline
        
mpd_qrem too costly to inline (cost=665, threshold=625) 
_mpd_qpowmod_uint
inline
        
mpd_qrem will not be inlined into _mpd_qpowmod_uint 
_mpd_qpowmod_uint
inline
        
mpd_qrem too costly to inline (cost=665, threshold=625) 
mpd_qpowmod
inline
        
mpd_qrem will not be inlined into mpd_qpowmod 
mpd_qpowmod
6653
        exp >>= 1;
6654
    }
6655
}
6656
6657
/* The powmod function: (base**exp) % mod */
6658
void
6659
mpd_qpowmod(mpd_t *result, const mpd_t *base, const mpd_t *exp,
6660
            const mpd_t *mod,
6661
            const mpd_context_t *ctx, uint32_t *status)
6662
{
6663
    mpd_context_t maxcontext;
6664
    MPD_NEW_STATIC(tbase,0,0,0,0);
6665
    MPD_NEW_STATIC(texp,0,0,0,0);
6666
    MPD_NEW_STATIC(tmod,0,0,0,0);
6667
    MPD_NEW_STATIC(tmp,0,0,0,0);
6668
    MPD_NEW_CONST(two,0,0,1,1,1,2);
6669
    mpd_ssize_t tbase_exp, texp_exp;
6670
    mpd_ssize_t i;
6671
    mpd_t t;
6672
    mpd_uint_t r;
6673
    uint8_t sign;
6674
6675
6676
    if (mpd_isspecial(base) || mpd_isspecial(exp) || mpd_isspecial(mod)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qpowmod
inline
        
mpd_isspecial inlined into mpd_qpowmod 
mpd_qpowmod
inline
                               
mpd_isspecial should always be inlined (cost=always) 
mpd_qpowmod
inline
                               
mpd_isspecial inlined into mpd_qpowmod 
mpd_qpowmod
inline
                                                     
mpd_isspecial should always be inlined (cost=always) 
mpd_qpowmod
inline
                                                     
mpd_isspecial inlined into mpd_qpowmod 
mpd_qpowmod
6677
        if (mpd_qcheck_3nans(result, base, exp, mod, ctx, status)) {
inline
            
mpd_qcheck_3nans can be inlined into mpd_qpowmod with cost=-14565 (threshold=250) 
mpd_qpowmod
inline
            
mpd_qcheck_3nans inlined into mpd_qpowmod 
mpd_qpowmod
6678
            return;
6679
        }
6680
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qpowmod with cost=130 (threshold=250) 
mpd_qpowmod
inline
        
mpd_seterror inlined into mpd_qpowmod 
mpd_qpowmod
6681
        return;
6682
    }
6683
6684
6685
    if (!_mpd_isint(base) || !_mpd_isint(exp) || !_mpd_isint(mod)) {
inline
         
_mpd_isint can be inlined into mpd_qpowmod with cost=110 (threshold=250) 
mpd_qpowmod
inline
         
_mpd_isint inlined into mpd_qpowmod 
mpd_qpowmod
inline
                              
_mpd_isint can be inlined into mpd_qpowmod with cost=110 (threshold=250) 
mpd_qpowmod
inline
                              
_mpd_isint inlined into mpd_qpowmod 
mpd_qpowmod
inline
                                                  
_mpd_isint can be inlined into mpd_qpowmod with cost=110 (threshold=250) 
mpd_qpowmod
inline
                                                  
_mpd_isint inlined into mpd_qpowmod 
mpd_qpowmod
6686
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qpowmod with cost=130 (threshold=250) 
mpd_qpowmod
inline
        
mpd_seterror inlined into mpd_qpowmod 
mpd_qpowmod
6687
        return;
6688
    }
6689
    if (mpd_iszerocoeff(mod)) {
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qpowmod
inline
        
mpd_iszerocoeff inlined into mpd_qpowmod 
mpd_qpowmod
6690
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qpowmod with cost=130 (threshold=250) 
mpd_qpowmod
inline
        
mpd_seterror inlined into mpd_qpowmod 
mpd_qpowmod
6691
        return;
6692
    }
6693
    if (mod->digits+mod->exp > ctx->prec) {
6694
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qpowmod with cost=130 (threshold=250) 
mpd_qpowmod
inline
        
mpd_seterror inlined into mpd_qpowmod 
mpd_qpowmod
6695
        return;
6696
    }
6697
6698
    sign = (mpd_isnegative(base)) && (mpd_isodd(exp));
inline
            
mpd_isnegative should always be inlined (cost=always) 
mpd_qpowmod
inline
            
mpd_isnegative inlined into mpd_qpowmod 
mpd_qpowmod
inline
                                      
mpd_isodd can be inlined into mpd_qpowmod with cost=95 (threshold=250) 
mpd_qpowmod
inline
                                      
mpd_isodd inlined into mpd_qpowmod 
mpd_qpowmod
6699
    if (mpd_iszerocoeff(exp)) {
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qpowmod
inline
        
mpd_iszerocoeff inlined into mpd_qpowmod 
mpd_qpowmod
6700
        if (mpd_iszerocoeff(base)) {
inline
            
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qpowmod
inline
            
mpd_iszerocoeff inlined into mpd_qpowmod 
mpd_qpowmod
6701
            mpd_seterror(result, MPD_Invalid_operation, status);
inline
            
mpd_seterror can be inlined into mpd_qpowmod with cost=130 (threshold=250) 
mpd_qpowmod
inline
            
mpd_seterror inlined into mpd_qpowmod 
mpd_qpowmod
6702
            return;
6703
        }
6704
        r = (_mpd_cmp_abs(mod, &one)==0) ? 0 : 1;
inline
             
_mpd_cmp_abs too costly to inline (cost=360, threshold=250) 
mpd_qpowmod
inline
             
_mpd_cmp_abs will not be inlined into mpd_qpowmod 
mpd_qpowmod
6705
        _settriple(result, sign, r, 0);
inline
        
_settriple can be inlined into mpd_qpowmod with cost=185 (threshold=250) 
mpd_qpowmod
inline
        
_settriple inlined into mpd_qpowmod 
mpd_qpowmod
6706
        return;
6707
    }
6708
    if (mpd_isnegative(exp)) {
inline
        
mpd_isnegative should always be inlined (cost=always) 
mpd_qpowmod
inline
        
mpd_isnegative inlined into mpd_qpowmod 
mpd_qpowmod
6709
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qpowmod with cost=130 (threshold=250) 
mpd_qpowmod
inline
        
mpd_seterror inlined into mpd_qpowmod 
mpd_qpowmod
6710
        return;
6711
    }
6712
    if (mpd_iszerocoeff(base)) {
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qpowmod
inline
        
mpd_iszerocoeff inlined into mpd_qpowmod 
mpd_qpowmod
6713
        _settriple(result, sign, 0, 0);
inline
        
_settriple can be inlined into mpd_qpowmod with cost=180 (threshold=250) 
mpd_qpowmod
inline
        
_settriple inlined into mpd_qpowmod 
mpd_qpowmod
6714
        return;
6715
    }
6716
6717
    mpd_maxcontext(&maxcontext);
inline
    
mpd_maxcontext will not be inlined into mpd_qpowmod because its definition is unavailable 
mpd_qpowmod
6718
6719
    mpd_qrescale(&tmod, mod, 0, &maxcontext, &maxcontext.status);
inline
    
mpd_qrescale can be inlined into mpd_qpowmod with cost=5 (threshold=375) 
mpd_qpowmod
inline
    
mpd_qrescale inlined into mpd_qpowmod 
mpd_qpowmod
6720
    if (maxcontext.status&MPD_Errors) {
gvn
                   
load of type i32 not eliminated because it is clobbered by call 
mpd_qpowmod
6721
        mpd_seterror(result, maxcontext.status&MPD_Errors, status);
inline
        
mpd_seterror can be inlined into mpd_qpowmod with cost=130 (threshold=250) 
mpd_qpowmod
inline
        
mpd_seterror inlined into mpd_qpowmod 
mpd_qpowmod
6722
        goto out;
6723
    }
6724
    maxcontext.status = 0;
6725
    mpd_set_positive(&tmod);
inline
    
mpd_set_positive should always be inlined (cost=always) 
mpd_qpowmod
inline
    
mpd_set_positive inlined into mpd_qpowmod 
mpd_qpowmod
6726
6727
    mpd_qround_to_int(&tbase, base, &maxcontext, status);
inline
    
mpd_qround_to_int can be inlined into mpd_qpowmod with cost=5 (threshold=375) 
mpd_qpowmod
inline
    
mpd_qround_to_int inlined into mpd_qpowmod 
mpd_qpowmod
6728
    mpd_set_positive(&tbase);
inline
    
mpd_set_positive should always be inlined (cost=always) 
mpd_qpowmod
inline
    
mpd_set_positive inlined into mpd_qpowmod 
mpd_qpowmod
6729
    tbase_exp = tbase.exp;
gvn
                      
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qpowmod
6730
    tbase.exp = 0;
6731
6732
    mpd_qround_to_int(&texp, exp, &maxcontext, status);
inline
    
mpd_qround_to_int can be inlined into mpd_qpowmod with cost=5 (threshold=375) 
mpd_qpowmod
inline
    
mpd_qround_to_int inlined into mpd_qpowmod 
mpd_qpowmod
6733
    texp_exp = texp.exp;
gvn
                    
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qpowmod
6734
    texp.exp = 0;
6735
6736
    /* base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo */
6737
    mpd_qrem(&tbase, &tbase, &tmod, &maxcontext, status);
inline
    
mpd_qrem too costly to inline (cost=665, threshold=625) 
mpd_qpowmod
inline
    
mpd_qrem will not be inlined into mpd_qpowmod 
mpd_qpowmod
6738
    mpd_qshiftl(result, &one, tbase_exp, status);
inline
    
mpd_qshiftl too costly to inline (cost=575, threshold=250) 
mpd_qpowmod
inline
    
mpd_qshiftl will not be inlined into mpd_qpowmod 
mpd_qpowmod
6739
    mpd_qrem(result, result, &tmod, &maxcontext, status);
inline
    
mpd_qrem too costly to inline (cost=665, threshold=625) 
mpd_qpowmod
inline
    
mpd_qrem will not be inlined into mpd_qpowmod 
mpd_qpowmod
6740
    _mpd_qmul_exact(&tbase, &tbase, result, &maxcontext, status);
inline
    
_mpd_qmul_exact too costly to inline (cost=265, threshold=250) 
mpd_qpowmod
inline
    
_mpd_qmul_exact will not be inlined into mpd_qpowmod 
mpd_qpowmod
6741
    mpd_qrem(&tbase, &tbase, &tmod, &maxcontext, status);
inline
    
mpd_qrem too costly to inline (cost=665, threshold=625) 
mpd_qpowmod
inline
    
mpd_qrem will not be inlined into mpd_qpowmod 
mpd_qpowmod
6742
    if (mpd_isspecial(&tbase) ||
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qpowmod
inline
        
mpd_isspecial inlined into mpd_qpowmod 
mpd_qpowmod
6743
        mpd_isspecial(&texp) ||
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qpowmod
inline
        
mpd_isspecial inlined into mpd_qpowmod 
mpd_qpowmod
6744
        mpd_isspecial(&tmod)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qpowmod
inline
        
mpd_isspecial inlined into mpd_qpowmod 
mpd_qpowmod
6745
        goto mpd_errors;
6746
    }
6747
6748
    for (i = 0; i < texp_exp; i++) {
loop-vectorize
    
loop not vectorized 
mpd_qpowmod
6749
        _mpd_qpowmod_uint(&tmp, &tbase, 10, &tmod, status);
inline
        
_mpd_qpowmod_uint can be inlined into mpd_qpowmod with cost=-14135 (threshold=325) 
mpd_qpowmod
inline
        
_mpd_qpowmod_uint inlined into mpd_qpowmod 
mpd_qpowmod
6750
        t = tmp;
6751
        tmp = tbase;
6752
        tbase = t;
6753
    }
6754
    if (mpd_isspecial(&tbase)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qpowmod
inline
        
mpd_isspecial inlined into mpd_qpowmod 
mpd_qpowmod
6755
        goto mpd_errors; /* GCOV_UNLIKELY */
6756
    }
6757
6758
    /* resize to smaller cannot fail */
6759
    mpd_qcopy(result, &one, status);
inline
    
mpd_qcopy can be inlined into mpd_qpowmod with cost=215 (threshold=250) 
mpd_qpowmod
inline
    
mpd_qcopy inlined into mpd_qpowmod 
mpd_qpowmod
6760
    while (mpd_isfinite(&texp) && !mpd_iszero(&texp)) {
inline
                                   
mpd_iszero should always be inlined (cost=always) 
mpd_qpowmod
inline
                                   
mpd_iszero inlined into mpd_qpowmod 
mpd_qpowmod
inline
           
mpd_isfinite should always be inlined (cost=always) 
mpd_qpowmod
inline
           
mpd_isfinite inlined into mpd_qpowmod 
mpd_qpowmod
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qpowmod
loop-vectorize
    
loop not vectorized 
mpd_qpowmod
6761
        if (mpd_isodd(&texp)) {
inline
            
mpd_isodd can be inlined into mpd_qpowmod with cost=80 (threshold=250) 
mpd_qpowmod
inline
            
mpd_isodd inlined into mpd_qpowmod 
mpd_qpowmod
6762
            _mpd_qmul_exact(result, result, &tbase, &maxcontext, status);
inline
            
_mpd_qmul_exact too costly to inline (cost=265, threshold=250) 
mpd_qpowmod
inline
            
_mpd_qmul_exact will not be inlined into mpd_qpowmod 
mpd_qpowmod
6763
            mpd_qrem(result, result, &tmod, &maxcontext, status);
inline
            
mpd_qrem too costly to inline (cost=665, threshold=625) 
mpd_qpowmod
inline
            
mpd_qrem will not be inlined into mpd_qpowmod 
mpd_qpowmod
6764
        }
6765
        _mpd_qmul_exact(&tbase, &tbase, &tbase, &maxcontext, status);
inline
        
_mpd_qmul_exact too costly to inline (cost=265, threshold=250) 
mpd_qpowmod
inline
        
_mpd_qmul_exact will not be inlined into mpd_qpowmod 
mpd_qpowmod
6766
        mpd_qrem(&tbase, &tbase, &tmod, &maxcontext, status);
inline
        
mpd_qrem too costly to inline (cost=665, threshold=625) 
mpd_qpowmod
inline
        
mpd_qrem will not be inlined into mpd_qpowmod 
mpd_qpowmod
6767
        mpd_qdivint(&texp, &texp, &two, &maxcontext, status);
inline
        
mpd_qdivint too costly to inline (cost=670, threshold=625) 
mpd_qpowmod
inline
        
mpd_qdivint will not be inlined into mpd_qpowmod 
mpd_qpowmod
6768
    }
6769
    if (mpd_isspecial(&texp) || mpd_isspecial(&tbase) ||
inline
                                
mpd_isspecial should always be inlined (cost=always) 
mpd_qpowmod
inline
                                
mpd_isspecial inlined into mpd_qpowmod 
mpd_qpowmod
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qpowmod
inline
        
mpd_isspecial inlined into mpd_qpowmod 
mpd_qpowmod
6770
        mpd_isspecial(&tmod) || mpd_isspecial(result)) {
inline
                                
mpd_isspecial should always be inlined (cost=always) 
mpd_qpowmod
inline
                                
mpd_isspecial inlined into mpd_qpowmod 
mpd_qpowmod
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qpowmod
inline
        
mpd_isspecial inlined into mpd_qpowmod 
mpd_qpowmod
6771
        /* MPD_Malloc_error */
6772
        goto mpd_errors;
6773
    }
6774
    else {
6775
        mpd_set_sign(result, sign);
inline
        
mpd_set_sign should always be inlined (cost=always) 
mpd_qpowmod
inline
        
mpd_set_sign inlined into mpd_qpowmod 
mpd_qpowmod
6776
    }
6777
6778
out:
6779
    mpd_del(&tbase);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qpowmod
inline
    
mpd_del inlined into mpd_qpowmod 
mpd_qpowmod
6780
    mpd_del(&texp);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qpowmod
inline
    
mpd_del inlined into mpd_qpowmod 
mpd_qpowmod
6781
    mpd_del(&tmod);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qpowmod
inline
    
mpd_del inlined into mpd_qpowmod 
mpd_qpowmod
6782
    mpd_del(&tmp);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qpowmod
inline
    
mpd_del inlined into mpd_qpowmod 
mpd_qpowmod
6783
    return;
6784
6785
mpd_errors:
6786
    mpd_setspecial(result, MPD_POS, MPD_NAN);
inline
    
mpd_setspecial can be inlined into mpd_qpowmod with cost=115 (threshold=250) 
mpd_qpowmod
inline
    
mpd_setspecial inlined into mpd_qpowmod 
mpd_qpowmod
6787
    goto out;
6788
}
6789
6790
void
6791
mpd_qquantize(mpd_t *result, const mpd_t *a, const mpd_t *b,
6792
              const mpd_context_t *ctx, uint32_t *status)
6793
{
6794
    uint32_t workstatus = 0;
6795
    mpd_ssize_t b_exp = b->exp;
6796
    mpd_ssize_t expdiff, shift;
6797
    mpd_uint_t rnd;
6798
6799
    if (mpd_isspecial(a) || mpd_isspecial(b)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qquantize
inline
        
mpd_isspecial inlined into mpd_qquantize 
mpd_qquantize
inline
                            
mpd_isspecial should always be inlined (cost=always) 
mpd_qquantize
inline
                            
mpd_isspecial inlined into mpd_qquantize 
mpd_qquantize
6800
        if (mpd_qcheck_nans(result, a, b, ctx, status)) {
inline
            
mpd_qcheck_nans too costly to inline (cost=385, threshold=250) 
mpd_qquantize
inline
            
mpd_qcheck_nans will not be inlined into mpd_qquantize 
mpd_qquantize
6801
            return;
6802
        }
6803
        if (mpd_isinfinite(a) && mpd_isinfinite(b)) {
inline
            
mpd_isinfinite should always be inlined (cost=always) 
mpd_qquantize
inline
            
mpd_isinfinite inlined into mpd_qquantize 
mpd_qquantize
inline
                                 
mpd_isinfinite should always be inlined (cost=always) 
mpd_qquantize
inline
                                 
mpd_isinfinite inlined into mpd_qquantize 
mpd_qquantize
6804
            mpd_qcopy(result, a, status);
inline
            
mpd_qcopy can be inlined into mpd_qquantize with cost=215 (threshold=250) 
mpd_qquantize
inline
            
mpd_qcopy inlined into mpd_qquantize 
mpd_qquantize
6805
            return;
6806
        }
6807
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qquantize with cost=130 (threshold=250) 
mpd_qquantize
inline
        
mpd_seterror inlined into mpd_qquantize 
mpd_qquantize
6808
        return;
6809
    }
6810
6811
    if (b->exp > ctx->emax || b->exp < mpd_etiny(ctx)) {
inline
                                       
mpd_etiny should always be inlined (cost=always) 
mpd_qquantize
inline
                                       
mpd_etiny inlined into mpd_qquantize 
mpd_qquantize
6812
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qquantize with cost=130 (threshold=250) 
mpd_qquantize
inline
        
mpd_seterror inlined into mpd_qquantize 
mpd_qquantize
6813
        return;
6814
    }
6815
6816
    if (mpd_iszero(a)) {
inline
        
mpd_iszero should always be inlined (cost=always) 
mpd_qquantize
inline
        
mpd_iszero inlined into mpd_qquantize 
mpd_qquantize
6817
        _settriple(result, mpd_sign(a), 0, b->exp);
inline
                           
mpd_sign should always be inlined (cost=always) 
mpd_qquantize
inline
                           
mpd_sign inlined into mpd_qquantize 
mpd_qquantize
inline
        
_settriple can be inlined into mpd_qquantize with cost=180 (threshold=250) 
mpd_qquantize
inline
        
_settriple inlined into mpd_qquantize 
mpd_qquantize
gvn
                                              
load of type i64 eliminated in favor of load 
mpd_qquantize
6818
        mpd_qfinalize(result, ctx, status);
inline
        
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qquantize
inline
        
mpd_qfinalize will not be inlined into mpd_qquantize 
mpd_qquantize
6819
        return;
6820
    }
6821
6822
6823
    expdiff = a->exp - b->exp;
gvn
                          
load of type i64 eliminated in favor of load 
mpd_qquantize
6824
    if (a->digits + expdiff > ctx->prec) {
gvn
                                   
load of type i64 eliminated in favor of load 
mpd_qquantize
6825
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qquantize with cost=130 (threshold=250) 
mpd_qquantize
inline
        
mpd_seterror inlined into mpd_qquantize 
mpd_qquantize
6826
        return;
6827
    }
6828
6829
    if (expdiff >= 0) {
6830
        shift = expdiff;
6831
        if (!mpd_qshiftl(result, a, shift, status)) {
inline
             
mpd_qshiftl too costly to inline (cost=575, threshold=250) 
mpd_qquantize
inline
             
mpd_qshiftl will not be inlined into mpd_qquantize 
mpd_qquantize
6832
            return;
6833
        }
6834
        result->exp = b_exp;
6835
    }
6836
    else {
6837
        /* At this point expdiff < 0 and a->digits+expdiff <= prec,
6838
         * so the shift before an increment will fit in prec. */
6839
        shift = -expdiff;
6840
        rnd = mpd_qshiftr(result, a, shift, status);
inline
              
mpd_qshiftr too costly to inline (cost=630, threshold=625) 
mpd_qquantize
inline
              
mpd_qshiftr will not be inlined into mpd_qquantize 
mpd_qquantize
6841
        if (rnd == MPD_UINT_MAX) {
6842
            return;
6843
        }
6844
        result->exp = b_exp;
6845
        if (!_mpd_apply_round_fit(result, rnd, ctx, status)) {
inline
             
_mpd_apply_round_fit can be inlined into mpd_qquantize with cost=-14055 (threshold=325) 
mpd_qquantize
inline
             
_mpd_apply_round_fit inlined into mpd_qquantize 
mpd_qquantize
6846
            return;
6847
        }
6848
        workstatus |= MPD_Rounded;
6849
        if (rnd) {
6850
            workstatus |= MPD_Inexact;
6851
        }
6852
    }
6853
6854
    if (mpd_adjexp(result) > ctx->emax ||
inline
        
mpd_adjexp should always be inlined (cost=always) 
mpd_qquantize
inline
        
mpd_adjexp inlined into mpd_qquantize 
mpd_qquantize
gvn
                                  
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qquantize
6855
        mpd_adjexp(result) < mpd_etiny(ctx)) {
inline
                             
mpd_etiny should always be inlined (cost=always) 
mpd_qquantize
inline
                             
mpd_etiny inlined into mpd_qquantize 
mpd_qquantize
inline
        
mpd_adjexp should always be inlined (cost=always) 
mpd_qquantize
inline
        
mpd_adjexp inlined into mpd_qquantize 
mpd_qquantize
6856
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qquantize with cost=130 (threshold=250) 
mpd_qquantize
inline
        
mpd_seterror inlined into mpd_qquantize 
mpd_qquantize
6857
        return;
6858
    }
6859
6860
    *status |= workstatus;
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qquantize
6861
    mpd_qfinalize(result, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qquantize
inline
    
mpd_qfinalize will not be inlined into mpd_qquantize 
mpd_qquantize
6862
}
6863
6864
void
6865
mpd_qreduce(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
6866
            uint32_t *status)
6867
{
6868
    mpd_ssize_t shift, maxexp, maxshift;
6869
    uint8_t sign_a = mpd_sign(a);
inline
                     
mpd_sign should always be inlined (cost=always) 
mpd_qreduce
inline
                     
mpd_sign inlined into mpd_qreduce 
mpd_qreduce
6870
6871
    if (mpd_isspecial(a)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qreduce
inline
        
mpd_isspecial inlined into mpd_qreduce 
mpd_qreduce
6872
        if (mpd_qcheck_nan(result, a, ctx, status)) {
inline
            
mpd_qcheck_nan too costly to inline (cost=335, threshold=250) 
mpd_qreduce
inline
            
mpd_qcheck_nan will not be inlined into mpd_qreduce 
mpd_qreduce
6873
            return;
6874
        }
6875
        mpd_qcopy(result, a, status);
inline
        
mpd_qcopy can be inlined into mpd_qreduce with cost=215 (threshold=250) 
mpd_qreduce
inline
        
mpd_qcopy inlined into mpd_qreduce 
mpd_qreduce
6876
        return;
6877
    }
6878
6879
    if (!mpd_qcopy(result, a, status)) {
inline
         
mpd_qcopy can be inlined into mpd_qreduce with cost=215 (threshold=250) 
mpd_qreduce
inline
         
mpd_qcopy inlined into mpd_qreduce 
mpd_qreduce
6880
        return;
6881
    }
6882
    mpd_qfinalize(result, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qreduce
inline
    
mpd_qfinalize will not be inlined into mpd_qreduce 
mpd_qreduce
6883
    if (mpd_isspecial(result)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qreduce
inline
        
mpd_isspecial inlined into mpd_qreduce 
mpd_qreduce
6884
        return;
6885
    }
6886
    if (mpd_iszero(result)) {
inline
        
mpd_iszero should always be inlined (cost=always) 
mpd_qreduce
inline
        
mpd_iszero inlined into mpd_qreduce 
mpd_qreduce
6887
        _settriple(result, sign_a, 0, 0);
inline
        
_settriple can be inlined into mpd_qreduce with cost=180 (threshold=250) 
mpd_qreduce
inline
        
_settriple inlined into mpd_qreduce 
mpd_qreduce
6888
        return;
6889
    }
6890
6891
    shift = mpd_trail_zeros(result);
inline
            
mpd_trail_zeros can be inlined into mpd_qreduce with cost=65 (threshold=250) 
mpd_qreduce
inline
            
mpd_trail_zeros inlined into mpd_qreduce 
mpd_qreduce
6892
    maxexp = (ctx->clamp) ? mpd_etop(ctx) : ctx->emax;
inline
                            
mpd_etop should always be inlined (cost=always) 
mpd_qreduce
inline
                            
mpd_etop inlined into mpd_qreduce 
mpd_qreduce
gvn
                   
load of type i32 not eliminated because it is clobbered by call 
mpd_qreduce
6893
    /* After the finalizing above result->exp <= maxexp. */
6894
    maxshift = maxexp - result->exp;
gvn
                                
load of type i64 not eliminated because it is clobbered by call 
mpd_qreduce
6895
    shift = (shift > maxshift) ? maxshift : shift;
6896
6897
    mpd_qshiftr_inplace(result, shift);
inline
    
mpd_qshiftr_inplace too costly to inline (cost=475, threshold=250) 
mpd_qreduce
inline
    
mpd_qshiftr_inplace will not be inlined into mpd_qreduce 
mpd_qreduce
6898
    result->exp += shift;
gvn
                
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qreduce
6899
}
6900
6901
void
6902
mpd_qrem(mpd_t *r, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx,
6903
         uint32_t *status)
6904
{
6905
    MPD_NEW_STATIC(q,0,0,0,0);
6906
6907
    if (mpd_isspecial(a) || mpd_isspecial(b)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qrem
inline
        
mpd_isspecial inlined into mpd_qrem 
mpd_qrem
inline
                            
mpd_isspecial should always be inlined (cost=always) 
mpd_qrem
inline
                            
mpd_isspecial inlined into mpd_qrem 
mpd_qrem
6908
        if (mpd_qcheck_nans(r, a, b, ctx, status)) {
inline
            
mpd_qcheck_nans too costly to inline (cost=385, threshold=250) 
mpd_qrem
inline
            
mpd_qcheck_nans will not be inlined into mpd_qrem 
mpd_qrem
6909
            return;
6910
        }
6911
        if (mpd_isinfinite(a)) {
inline
            
mpd_isinfinite should always be inlined (cost=always) 
mpd_qrem
inline
            
mpd_isinfinite inlined into mpd_qrem 
mpd_qrem
6912
            mpd_seterror(r, MPD_Invalid_operation, status);
inline
            
mpd_seterror can be inlined into mpd_qrem with cost=130 (threshold=250) 
mpd_qrem
inline
            
mpd_seterror inlined into mpd_qrem 
mpd_qrem
6913
            return;
6914
        }
6915
        if (mpd_isinfinite(b)) {
inline
            
mpd_isinfinite should always be inlined (cost=always) 
mpd_qrem
inline
            
mpd_isinfinite inlined into mpd_qrem 
mpd_qrem
6916
            mpd_qcopy(r, a, status);
inline
            
mpd_qcopy can be inlined into mpd_qrem with cost=215 (threshold=250) 
mpd_qrem
inline
            
mpd_qcopy inlined into mpd_qrem 
mpd_qrem
6917
            mpd_qfinalize(r, ctx, status);
inline
            
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qrem
inline
            
mpd_qfinalize will not be inlined into mpd_qrem 
mpd_qrem
6918
            return;
6919
        }
6920
        /* debug */
6921
        abort(); /* GCOV_NOT_REACHED */
inline
        
abort will not be inlined into mpd_qrem because its definition is unavailable 
mpd_qrem
6922
    }
6923
    if (mpd_iszerocoeff(b)) {
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qrem
inline
        
mpd_iszerocoeff inlined into mpd_qrem 
mpd_qrem
6924
        if (mpd_iszerocoeff(a)) {
inline
            
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qrem
inline
            
mpd_iszerocoeff inlined into mpd_qrem 
mpd_qrem
6925
            mpd_seterror(r, MPD_Division_undefined, status);
inline
            
mpd_seterror can be inlined into mpd_qrem with cost=130 (threshold=250) 
mpd_qrem
inline
            
mpd_seterror inlined into mpd_qrem 
mpd_qrem
6926
        }
6927
        else {
6928
            mpd_seterror(r, MPD_Invalid_operation, status);
inline
            
mpd_seterror can be inlined into mpd_qrem with cost=130 (threshold=250) 
mpd_qrem
inline
            
mpd_seterror inlined into mpd_qrem 
mpd_qrem
6929
        }
6930
        return;
6931
    }
6932
6933
    _mpd_qdivmod(&q, r, a, b, ctx, status);
inline
    
_mpd_qdivmod too costly to inline (cost=645, threshold=625) 
mpd_qrem
inline
    
_mpd_qdivmod will not be inlined into mpd_qrem 
mpd_qrem
6934
    mpd_del(&q);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qrem
inline
    
mpd_del inlined into mpd_qrem 
mpd_qrem
6935
    mpd_qfinalize(r, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qrem
inline
    
mpd_qfinalize will not be inlined into mpd_qrem 
mpd_qrem
6936
}
6937
6938
void
6939
mpd_qrem_near(mpd_t *r, const mpd_t *a, const mpd_t *b,
6940
              const mpd_context_t *ctx, uint32_t *status)
6941
{
6942
    mpd_context_t workctx;
6943
    MPD_NEW_STATIC(btmp,0,0,0,0);
6944
    MPD_NEW_STATIC(q,0,0,0,0);
6945
    mpd_ssize_t expdiff, qdigits;
6946
    int cmp, isodd, allnine;
6947
6948
    if (mpd_isspecial(a) || mpd_isspecial(b)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qrem_near
inline
        
mpd_isspecial inlined into mpd_qrem_near 
mpd_qrem_near
inline
                            
mpd_isspecial should always be inlined (cost=always) 
mpd_qrem_near
inline
                            
mpd_isspecial inlined into mpd_qrem_near 
mpd_qrem_near
6949
        if (mpd_qcheck_nans(r, a, b, ctx, status)) {
inline
            
mpd_qcheck_nans too costly to inline (cost=385, threshold=250) 
mpd_qrem_near
inline
            
mpd_qcheck_nans will not be inlined into mpd_qrem_near 
mpd_qrem_near
6950
            return;
6951
        }
6952
        if (mpd_isinfinite(a)) {
inline
            
mpd_isinfinite should always be inlined (cost=always) 
mpd_qrem_near
inline
            
mpd_isinfinite inlined into mpd_qrem_near 
mpd_qrem_near
6953
            mpd_seterror(r, MPD_Invalid_operation, status);
inline
            
mpd_seterror can be inlined into mpd_qrem_near with cost=130 (threshold=250) 
mpd_qrem_near
inline
            
mpd_seterror inlined into mpd_qrem_near 
mpd_qrem_near
6954
            return;
6955
        }
6956
        if (mpd_isinfinite(b)) {
inline
            
mpd_isinfinite should always be inlined (cost=always) 
mpd_qrem_near
inline
            
mpd_isinfinite inlined into mpd_qrem_near 
mpd_qrem_near
6957
            mpd_qcopy(r, a, status);
inline
            
mpd_qcopy can be inlined into mpd_qrem_near with cost=215 (threshold=250) 
mpd_qrem_near
inline
            
mpd_qcopy inlined into mpd_qrem_near 
mpd_qrem_near
6958
            mpd_qfinalize(r, ctx, status);
inline
            
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qrem_near
inline
            
mpd_qfinalize will not be inlined into mpd_qrem_near 
mpd_qrem_near
6959
            return;
6960
        }
6961
        /* debug */
6962
        abort(); /* GCOV_NOT_REACHED */
inline
        
abort will not be inlined into mpd_qrem_near because its definition is unavailable 
mpd_qrem_near
6963
    }
6964
    if (mpd_iszerocoeff(b)) {
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qrem_near
inline
        
mpd_iszerocoeff inlined into mpd_qrem_near 
mpd_qrem_near
6965
        if (mpd_iszerocoeff(a)) {
inline
            
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qrem_near
inline
            
mpd_iszerocoeff inlined into mpd_qrem_near 
mpd_qrem_near
6966
            mpd_seterror(r,  MPD_Division_undefined, status);
inline
            
mpd_seterror can be inlined into mpd_qrem_near with cost=130 (threshold=250) 
mpd_qrem_near
inline
            
mpd_seterror inlined into mpd_qrem_near 
mpd_qrem_near
6967
        }
6968
        else {
6969
            mpd_seterror(r,  MPD_Invalid_operation, status);
inline
            
mpd_seterror can be inlined into mpd_qrem_near with cost=130 (threshold=250) 
mpd_qrem_near
inline
            
mpd_seterror inlined into mpd_qrem_near 
mpd_qrem_near
6970
        }
6971
        return;
6972
    }
6973
6974
    if (r == b) {
6975
        if (!mpd_qcopy(&btmp, b, status)) {
inline
             
mpd_qcopy can be inlined into mpd_qrem_near with cost=215 (threshold=250) 
mpd_qrem_near
inline
             
mpd_qcopy inlined into mpd_qrem_near 
mpd_qrem_near
6976
            mpd_seterror(r, MPD_Malloc_error, status);
inline
            
mpd_seterror can be inlined into mpd_qrem_near with cost=130 (threshold=250) 
mpd_qrem_near
inline
            
mpd_seterror inlined into mpd_qrem_near 
mpd_qrem_near
6977
            return;
6978
        }
6979
        b = &btmp;
6980
    }
6981
6982
    _mpd_qdivmod(&q, r, a, b, ctx, status);
inline
    
_mpd_qdivmod too costly to inline (cost=645, threshold=625) 
mpd_qrem_near
inline
    
_mpd_qdivmod will not be inlined into mpd_qrem_near 
mpd_qrem_near
6983
    if (mpd_isnan(&q) || mpd_isnan(r)) {
inline
        
mpd_isnan should always be inlined (cost=always) 
mpd_qrem_near
inline
        
mpd_isnan inlined into mpd_qrem_near 
mpd_qrem_near
inline
                         
mpd_isnan should always be inlined (cost=always) 
mpd_qrem_near
inline
                         
mpd_isnan inlined into mpd_qrem_near 
mpd_qrem_near
6984
        goto finish;
6985
    }
6986
    if (mpd_iszerocoeff(r)) {
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
mpd_qrem_near
inline
        
mpd_iszerocoeff inlined into mpd_qrem_near 
mpd_qrem_near
6987
        goto finish;
6988
    }
6989
6990
    expdiff = mpd_adjexp(b) - mpd_adjexp(r);
inline
              
mpd_adjexp should always be inlined (cost=always) 
mpd_qrem_near
inline
              
mpd_adjexp inlined into mpd_qrem_near 
mpd_qrem_near
inline
                              
mpd_adjexp should always be inlined (cost=always) 
mpd_qrem_near
inline
                              
mpd_adjexp inlined into mpd_qrem_near 
mpd_qrem_near
6991
    if (-1 <= expdiff && expdiff <= 1) {
6992
6993
        allnine = mpd_coeff_isallnine(&q);
inline
                  
mpd_coeff_isallnine can be inlined into mpd_qrem_near with cost=-14680 (threshold=250) 
mpd_qrem_near
inline
                  
mpd_coeff_isallnine inlined into mpd_qrem_near 
mpd_qrem_near
6994
        qdigits = q.digits;
gvn
                    
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qrem_near
6995
        isodd = mpd_isodd(&q);
inline
                
mpd_isodd can be inlined into mpd_qrem_near with cost=80 (threshold=250) 
mpd_qrem_near
inline
                
mpd_isodd inlined into mpd_qrem_near 
mpd_qrem_near
6996
6997
        mpd_maxcontext(&workctx);
inline
        
mpd_maxcontext will not be inlined into mpd_qrem_near because its definition is unavailable 
mpd_qrem_near
6998
        if (mpd_sign(a) == mpd_sign(b)) {
inline
            
mpd_sign should always be inlined (cost=always) 
mpd_qrem_near
inline
            
mpd_sign inlined into mpd_qrem_near 
mpd_qrem_near
inline
                           
mpd_sign should always be inlined (cost=always) 
mpd_qrem_near
inline
                           
mpd_sign inlined into mpd_qrem_near 
mpd_qrem_near
6999
            /* sign(r) == sign(b) */
7000
            _mpd_qsub(&q, r, b, &workctx, &workctx.status);
inline
            
_mpd_qsub can be inlined into mpd_qrem_near with cost=-14980 (threshold=375) 
mpd_qrem_near
inline
            
_mpd_qsub inlined into mpd_qrem_near 
mpd_qrem_near
7001
        }
7002
        else {
7003
            /* sign(r) != sign(b) */
7004
            _mpd_qadd(&q, r, b, &workctx, &workctx.status);
inline
            
_mpd_qadd can be inlined into mpd_qrem_near with cost=-14985 (threshold=375) 
mpd_qrem_near
inline
            
_mpd_qadd inlined into mpd_qrem_near 
mpd_qrem_near
7005
        }
7006
7007
        if (workctx.status&MPD_Errors) {
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
mpd_qrem_near
7008
            mpd_seterror(r, workctx.status&MPD_Errors, status);
inline
            
mpd_seterror can be inlined into mpd_qrem_near with cost=130 (threshold=250) 
mpd_qrem_near
inline
            
mpd_seterror inlined into mpd_qrem_near 
mpd_qrem_near
7009
            goto finish;
7010
        }
7011
7012
        cmp = _mpd_cmp_abs(&q, r);
inline
              
_mpd_cmp_abs too costly to inline (cost=360, threshold=250) 
mpd_qrem_near
inline
              
_mpd_cmp_abs will not be inlined into mpd_qrem_near 
mpd_qrem_near
7013
        if (cmp < 0 || (cmp == 0 && isodd)) {
7014
            /* abs(r) > abs(b)/2 or abs(r) == abs(b)/2 and isodd(quotient) */
7015
            if (allnine && qdigits == ctx->prec) {
gvn
                                           
load of type i64 not eliminated because it is clobbered by call 
mpd_qrem_near
7016
                /* abs(quotient) + 1 == 10**prec */
7017
                mpd_seterror(r, MPD_Division_impossible, status);
inline
                
mpd_seterror can be inlined into mpd_qrem_near with cost=130 (threshold=250) 
mpd_qrem_near
inline
                
mpd_seterror inlined into mpd_qrem_near 
mpd_qrem_near
7018
                goto finish;
7019
            }
7020
            mpd_qcopy(r, &q, status);
inline
            
mpd_qcopy can be inlined into mpd_qrem_near with cost=180 (threshold=250) 
mpd_qrem_near
inline
            
mpd_qcopy inlined into mpd_qrem_near 
mpd_qrem_near
7021
        }
7022
    }
7023
7024
7025
finish:
7026
    mpd_del(&btmp);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qrem_near
inline
    
mpd_del inlined into mpd_qrem_near 
mpd_qrem_near
7027
    mpd_del(&q);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qrem_near
inline
    
mpd_del inlined into mpd_qrem_near 
mpd_qrem_near
7028
    mpd_qfinalize(r, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qrem_near
inline
    
mpd_qfinalize will not be inlined into mpd_qrem_near 
mpd_qrem_near
7029
}
7030
7031
static void
7032
_mpd_qrescale(mpd_t *result, const mpd_t *a, mpd_ssize_t exp,
7033
              const mpd_context_t *ctx, uint32_t *status)
7034
{
7035
    mpd_ssize_t expdiff, shift;
7036
    mpd_uint_t rnd;
7037
7038
    if (mpd_isspecial(a)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
_mpd_qrescale
inline
        
mpd_isspecial inlined into _mpd_qrescale 
_mpd_qrescale
7039
        mpd_qcopy(result, a, status);
inline
        
mpd_qcopy can be inlined into _mpd_qrescale with cost=215 (threshold=250) 
_mpd_qrescale
inline
        
mpd_qcopy inlined into _mpd_qrescale 
_mpd_qrescale
7040
        return;
7041
    }
7042
7043
    if (mpd_iszero(a)) {
inline
        
mpd_iszero should always be inlined (cost=always) 
_mpd_qrescale
inline
        
mpd_iszero inlined into _mpd_qrescale 
_mpd_qrescale
7044
        _settriple(result, mpd_sign(a), 0, exp);
inline
                           
mpd_sign should always be inlined (cost=always) 
_mpd_qrescale
inline
                           
mpd_sign inlined into _mpd_qrescale 
_mpd_qrescale
inline
        
_settriple can be inlined into _mpd_qrescale with cost=180 (threshold=250) 
_mpd_qrescale
inline
        
_settriple inlined into _mpd_qrescale 
_mpd_qrescale
7045
        return;
7046
    }
7047
7048
    expdiff = a->exp - exp;
7049
    if (expdiff >= 0) {
7050
        shift = expdiff;
7051
        if (a->digits + shift > MPD_MAX_PREC+1) {
7052
            mpd_seterror(result, MPD_Invalid_operation, status);
inline
            
mpd_seterror can be inlined into _mpd_qrescale with cost=130 (threshold=250) 
_mpd_qrescale
inline
            
mpd_seterror inlined into _mpd_qrescale 
_mpd_qrescale
7053
            return;
7054
        }
7055
        if (!mpd_qshiftl(result, a, shift, status)) {
inline
             
mpd_qshiftl too costly to inline (cost=575, threshold=250) 
_mpd_qrescale
inline
             
mpd_qshiftl will not be inlined into _mpd_qrescale 
_mpd_qrescale
7056
            return;
7057
        }
7058
        result->exp = exp;
7059
    }
7060
    else {
7061
        shift = -expdiff;
7062
        rnd = mpd_qshiftr(result, a, shift, status);
inline
              
mpd_qshiftr too costly to inline (cost=630, threshold=625) 
_mpd_qrescale
inline
              
mpd_qshiftr will not be inlined into _mpd_qrescale 
_mpd_qrescale
7063
        if (rnd == MPD_UINT_MAX) {
7064
            return;
7065
        }
7066
        result->exp = exp;
7067
        _mpd_apply_round_excess(result, rnd, ctx, status);
inline
        
_mpd_apply_round_excess too costly to inline (cost=755, threshold=325) 
_mpd_qrescale
inline
        
_mpd_apply_round_excess will not be inlined into _mpd_qrescale 
_mpd_qrescale
7068
        *status |= MPD_Rounded;
gvn
                
load of type i32 not eliminated because it is clobbered by call 
_mpd_qrescale
7069
        if (rnd) {
7070
            *status |= MPD_Inexact;
7071
        }
7072
    }
7073
7074
    if (mpd_issubnormal(result, ctx)) {
inline
        
mpd_issubnormal can be inlined into _mpd_qrescale with cost=45 (threshold=325) 
_mpd_qrescale
inline
        
mpd_issubnormal inlined into _mpd_qrescale 
_mpd_qrescale
7075
        *status |= MPD_Subnormal;
gvn
                
load of type i32 not eliminated because it is clobbered by call 
_mpd_qrescale
7076
    }
7077
}
7078
7079
/*
7080
 * Rescale a number so that it has exponent 'exp'. Does not regard context
7081
 * precision, emax, emin, but uses the rounding mode. Special numbers are
7082
 * quietly copied. Restrictions:
7083
 *
7084
 *     MPD_MIN_ETINY <= exp <= MPD_MAX_EMAX+1
7085
 *     result->digits <= MPD_MAX_PREC+1
7086
 */
7087
void
7088
mpd_qrescale(mpd_t *result, const mpd_t *a, mpd_ssize_t exp,
7089
             const mpd_context_t *ctx, uint32_t *status)
7090
{
7091
    if (exp > MPD_MAX_EMAX+1 || exp < MPD_MIN_ETINY) {
7092
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qrescale with cost=130 (threshold=250) 
mpd_qrescale
inline
        
mpd_seterror inlined into mpd_qrescale 
mpd_qrescale
7093
        return;
7094
    }
7095
7096
    _mpd_qrescale(result, a, exp, ctx, status);
inline
    
_mpd_qrescale too costly to inline (cost=630, threshold=625) 
mpd_qrescale
inline
    
_mpd_qrescale will not be inlined into mpd_qrescale 
mpd_qrescale
inline
    
_mpd_qrescale too costly to inline (cost=630, threshold=625) 
mpd_qpowmod
inline
    
_mpd_qrescale will not be inlined into mpd_qpowmod 
mpd_qpowmod
7097
}
7098
7099
/*
7100
 * Same as mpd_qrescale, but with relaxed restrictions. The result of this
7101
 * function should only be used for formatting a number and never as input
7102
 * for other operations.
7103
 *
7104
 *     MPD_MIN_ETINY-MPD_MAX_PREC <= exp <= MPD_MAX_EMAX+1
7105
 *     result->digits <= MPD_MAX_PREC+1
7106
 */
7107
void
7108
mpd_qrescale_fmt(mpd_t *result, const mpd_t *a, mpd_ssize_t exp,
7109
                 const mpd_context_t *ctx, uint32_t *status)
7110
{
7111
    if (exp > MPD_MAX_EMAX+1 || exp < MPD_MIN_ETINY-MPD_MAX_PREC) {
7112
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qrescale_fmt with cost=130 (threshold=250) 
mpd_qrescale_fmt
inline
        
mpd_seterror inlined into mpd_qrescale_fmt 
mpd_qrescale_fmt
7113
        return;
7114
    }
7115
7116
    _mpd_qrescale(result, a, exp, ctx, status);
inline
    
_mpd_qrescale too costly to inline (cost=630, threshold=625) 
mpd_qrescale_fmt
inline
    
_mpd_qrescale will not be inlined into mpd_qrescale_fmt 
mpd_qrescale_fmt
7117
}
7118
7119
/* Round to an integer according to 'action' and ctx->round. */
7120
enum {TO_INT_EXACT, TO_INT_SILENT, TO_INT_TRUNC};
7121
static void
7122
_mpd_qround_to_integral(int action, mpd_t *result, const mpd_t *a,
7123
                        const mpd_context_t *ctx, uint32_t *status)
7124
{
7125
    mpd_uint_t rnd;
7126
7127
    if (mpd_isspecial(a)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
_mpd_qround_to_integral
inline
        
mpd_isspecial inlined into _mpd_qround_to_integral 
_mpd_qround_to_integral
7128
        if (mpd_qcheck_nan(result, a, ctx, status)) {
inline
            
mpd_qcheck_nan too costly to inline (cost=335, threshold=250) 
_mpd_qround_to_integral
inline
            
mpd_qcheck_nan will not be inlined into _mpd_qround_to_integral 
_mpd_qround_to_integral
7129
            return;
7130
        }
7131
        mpd_qcopy(result, a, status);
inline
        
mpd_qcopy can be inlined into _mpd_qround_to_integral with cost=215 (threshold=250) 
_mpd_qround_to_integral
inline
        
mpd_qcopy inlined into _mpd_qround_to_integral 
_mpd_qround_to_integral
7132
        return;
7133
    }
7134
    if (a->exp >= 0) {
7135
        mpd_qcopy(result, a, status);
inline
        
mpd_qcopy can be inlined into _mpd_qround_to_integral with cost=215 (threshold=250) 
_mpd_qround_to_integral
inline
        
mpd_qcopy inlined into _mpd_qround_to_integral 
_mpd_qround_to_integral
7136
        return;
7137
    }
7138
    if (mpd_iszerocoeff(a)) {
inline
        
mpd_iszerocoeff should always be inlined (cost=always) 
_mpd_qround_to_integral
inline
        
mpd_iszerocoeff inlined into _mpd_qround_to_integral 
_mpd_qround_to_integral
7139
        _settriple(result, mpd_sign(a), 0, 0);
inline
                           
mpd_sign should always be inlined (cost=always) 
_mpd_qround_to_integral
inline
                           
mpd_sign inlined into _mpd_qround_to_integral 
_mpd_qround_to_integral
inline
        
_settriple can be inlined into _mpd_qround_to_integral with cost=180 (threshold=250) 
_mpd_qround_to_integral
inline
        
_settriple inlined into _mpd_qround_to_integral 
_mpd_qround_to_integral
7140
        return;
7141
    }
7142
7143
    rnd = mpd_qshiftr(result, a, -a->exp, status);
inline
          
mpd_qshiftr too costly to inline (cost=630, threshold=625) 
_mpd_qround_to_integral
inline
          
mpd_qshiftr will not be inlined into _mpd_qround_to_integral 
_mpd_qround_to_integral
7144
    if (rnd == MPD_UINT_MAX) {
7145
        return;
7146
    }
7147
    result->exp = 0;
7148
7149
    if (action == TO_INT_EXACT || action == TO_INT_SILENT) {
7150
        _mpd_apply_round_excess(result, rnd, ctx, status);
inline
        
_mpd_apply_round_excess too costly to inline (cost=755, threshold=325) 
_mpd_qround_to_integral
inline
        
_mpd_apply_round_excess will not be inlined into _mpd_qround_to_integral 
_mpd_qround_to_integral
7151
        if (action == TO_INT_EXACT) {
7152
            *status |= MPD_Rounded;
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
_mpd_qround_to_integral
7153
            if (rnd) {
7154
                *status |= MPD_Inexact;
7155
            }
7156
        }
7157
    }
7158
}
7159
7160
void
7161
mpd_qround_to_intx(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
7162
                   uint32_t *status)
7163
{
7164
    (void)_mpd_qround_to_integral(TO_INT_EXACT, result, a, ctx, status);
inline
          
_mpd_qround_to_integral too costly to inline (cost=630, threshold=625) 
mpd_qround_to_intx
inline
          
_mpd_qround_to_integral will not be inlined into mpd_qround_to_intx 
mpd_qround_to_intx
7165
}
7166
7167
void
7168
mpd_qround_to_int(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
7169
                  uint32_t *status)
7170
{
7171
    (void)_mpd_qround_to_integral(TO_INT_SILENT, result, a, ctx, status);
inline
          
_mpd_qround_to_integral too costly to inline (cost=630, threshold=625) 
mpd_qround_to_int
inline
          
_mpd_qround_to_integral will not be inlined into mpd_qround_to_int 
mpd_qround_to_int
inline
          
_mpd_qround_to_integral too costly to inline (cost=630, threshold=625) 
mpd_qpowmod
inline
          
_mpd_qround_to_integral will not be inlined into mpd_qpowmod 
mpd_qpowmod
7172
}
7173
7174
void
7175
mpd_qtrunc(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
7176
           uint32_t *status)
7177
{
7178
    (void)_mpd_qround_to_integral(TO_INT_TRUNC, result, a, ctx, status);
inline
          
_mpd_qround_to_integral too costly to inline (cost=630, threshold=625) 
mpd_qtrunc
inline
          
_mpd_qround_to_integral will not be inlined into mpd_qtrunc 
mpd_qtrunc
inline
          
_mpd_qround_to_integral too costly to inline (cost=615, threshold=250) 
_mpd_base_ndivmod
inline
          
_mpd_qround_to_integral will not be inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7179
}
7180
7181
void
7182
mpd_qfloor(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
7183
           uint32_t *status)
7184
{
7185
    mpd_context_t workctx = *ctx;
7186
    workctx.round = MPD_ROUND_FLOOR;
7187
    (void)_mpd_qround_to_integral(TO_INT_SILENT, result, a,
inline
          
_mpd_qround_to_integral too costly to inline (cost=630, threshold=625) 
mpd_qfloor
inline
          
_mpd_qround_to_integral will not be inlined into mpd_qfloor 
mpd_qfloor
7188
                                  &workctx, status);
7189
}
7190
7191
void
7192
mpd_qceil(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
7193
          uint32_t *status)
7194
{
7195
    mpd_context_t workctx = *ctx;
7196
    workctx.round = MPD_ROUND_CEILING;
7197
    (void)_mpd_qround_to_integral(TO_INT_SILENT, result, a,
inline
          
_mpd_qround_to_integral too costly to inline (cost=630, threshold=625) 
mpd_qceil
inline
          
_mpd_qround_to_integral will not be inlined into mpd_qceil 
mpd_qceil
7198
                                  &workctx, status);
7199
}
7200
7201
int
7202
mpd_same_quantum(const mpd_t *a, const mpd_t *b)
7203
{
7204
    if (mpd_isspecial(a) || mpd_isspecial(b)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_same_quantum
inline
        
mpd_isspecial inlined into mpd_same_quantum 
mpd_same_quantum
inline
                            
mpd_isspecial should always be inlined (cost=always) 
mpd_same_quantum
inline
                            
mpd_isspecial inlined into mpd_same_quantum 
mpd_same_quantum
7205
        return ((mpd_isnan(a) && mpd_isnan(b)) ||
inline
                                 
mpd_isnan should always be inlined (cost=always) 
mpd_same_quantum
inline
                                 
mpd_isnan inlined into mpd_same_quantum 
mpd_same_quantum
inline
                 
mpd_isnan should always be inlined (cost=always) 
mpd_same_quantum
inline
                 
mpd_isnan inlined into mpd_same_quantum 
mpd_same_quantum
7206
                (mpd_isinfinite(a) && mpd_isinfinite(b)));
inline
                                      
mpd_isinfinite should always be inlined (cost=always) 
mpd_same_quantum
inline
                                      
mpd_isinfinite inlined into mpd_same_quantum 
mpd_same_quantum
inline
                 
mpd_isinfinite should always be inlined (cost=always) 
mpd_same_quantum
inline
                 
mpd_isinfinite inlined into mpd_same_quantum 
mpd_same_quantum
7207
    }
7208
7209
    return a->exp == b->exp;
7210
}
7211
7212
/* Schedule the increase in precision for the Newton iteration. */
7213
static inline int
7214
recpr_schedule_prec(mpd_ssize_t klist[MPD_MAX_PREC_LOG2],
7215
                    mpd_ssize_t maxprec, mpd_ssize_t initprec)
7216
{
7217
    mpd_ssize_t k;
7218
    int i;
7219
7220
    assert(maxprec > 0 && initprec > 0);
7221
    if (maxprec <= initprec) return -1;
7222
7223
    i = 0; k = maxprec;
7224
    do {
loop-vectorize
    
loop not vectorized: could not determine number of loop iterations 
_mpd_base_ndivmod
loop-vectorize
    
loop not vectorized 
_mpd_base_ndivmod
7225
        k = (k+1) / 2;
7226
        klist[i++] = k;
7227
    } while (k > initprec);
7228
7229
    return i-1;
7230
}
7231
7232
/*
7233
 * Initial approximation for the reciprocal:
7234
 *    k_0 := MPD_RDIGITS-2
7235
 *    z_0 := 10**(-k_0) * floor(10**(2*k_0 + 2) / floor(v * 10**(k_0 + 2)))
7236
 * Absolute error:
7237
 *    |1/v - z_0| < 10**(-k_0)
7238
 * ACL2 proof: maxerror-inverse-approx
7239
 */
7240
static void
7241
_mpd_qreciprocal_approx(mpd_t *z, const mpd_t *v, uint32_t *status)
7242
{
7243
    mpd_uint_t p10data[2] = {0, mpd_pow10[MPD_RDIGITS-2]};
7244
    mpd_uint_t dummy, word;
7245
    int n;
7246
7247
    assert(v->exp == -v->digits);
7248
7249
    _mpd_get_msdigits(&dummy, &word, v, MPD_RDIGITS);
inline
    
_mpd_get_msdigits can be inlined into _mpd_qreciprocal_approx with cost=140 (threshold=325) 
_mpd_qreciprocal_approx
inline
    
_mpd_get_msdigits inlined into _mpd_qreciprocal_approx 
_mpd_qreciprocal_approx
7250
    n = mpd_word_digits(word);
inline
        
mpd_word_digits should always be inlined (cost=always) 
_mpd_qreciprocal_approx
inline
        
mpd_word_digits inlined into _mpd_qreciprocal_approx 
_mpd_qreciprocal_approx
7251
    word *= mpd_pow10[MPD_RDIGITS-n];
7252
7253
    mpd_qresize(z, 2, status);
inline
    
mpd_qresize should always be inlined (cost=always) 
_mpd_qreciprocal_approx
inline
    
mpd_qresize inlined into _mpd_qreciprocal_approx 
_mpd_qreciprocal_approx
7254
    (void)_mpd_shortdiv(z->data, p10data, 2, word);
inline
          
_mpd_shortdiv will not be inlined into _mpd_qreciprocal_approx because its definition is unavailable 
_mpd_qreciprocal_approx
gvn
                           
load of type i64* not eliminated because it is clobbered by call 
_mpd_qreciprocal_approx
gvn
                           
load of type i64* not eliminated because it is clobbered by call 
_mpd_qreciprocal
gvn
                           
load of type i64* not eliminated because it is clobbered by call 
_mpd_base_ndivmod
7255
7256
    mpd_clear_flags(z);
inline
    
mpd_clear_flags should always be inlined (cost=always) 
_mpd_qreciprocal_approx
inline
    
mpd_clear_flags inlined into _mpd_qreciprocal_approx 
_mpd_qreciprocal_approx
7257
    z->exp = -(MPD_RDIGITS-2);
7258
    z->len = (z->data[1] == 0) ? 1 : 2;
gvn
                 
load of type i64* not eliminated in favor of load because it is clobbered by call 
_mpd_qreciprocal_approx
gvn
              
load of type i64 not eliminated because it is clobbered by store 
_mpd_qreciprocal_approx
gvn
                 
load of type i64* not eliminated in favor of load because it is clobbered by call 
_mpd_qreciprocal
gvn
              
load of type i64 not eliminated because it is clobbered by store 
_mpd_qreciprocal
gvn
                 
load of type i64* not eliminated in favor of load because it is clobbered by call 
_mpd_base_ndivmod
gvn
              
load of type i64 not eliminated because it is clobbered by store 
_mpd_base_ndivmod
7259
    mpd_setdigits(z);
inline
    
mpd_setdigits can be inlined into _mpd_qreciprocal_approx with cost=295 (threshold=325) 
_mpd_qreciprocal_approx
inline
    
mpd_setdigits inlined into _mpd_qreciprocal_approx 
_mpd_qreciprocal_approx
7260
}
7261
7262
/*
7263
 * Reciprocal, calculated with Newton's Method. Assumption: result != a.
7264
 * NOTE: The comments in the function show that certain operations are
7265
 * exact. The proof for the maximum error is too long to fit in here.
7266
 * ACL2 proof: maxerror-inverse-complete
7267
 */
7268
static void
7269
_mpd_qreciprocal(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
7270
                 uint32_t *status)
7271
{
7272
    mpd_context_t varcontext, maxcontext;
7273
    mpd_t *z = result;         /* current approximation */
7274
    mpd_t *v;                  /* a, normalized to a number between 0.1 and 1 */
7275
    MPD_NEW_SHARED(vtmp, a);   /* v shares data with a */
gvn
    
load of type i8 not eliminated because it is clobbered by call 
_mpd_base_ndivmod
7276
    MPD_NEW_STATIC(s,0,0,0,0); /* temporary variable */
7277
    MPD_NEW_STATIC(t,0,0,0,0); /* temporary variable */
7278
    MPD_NEW_CONST(two,0,0,1,1,1,2); /* const 2 */
7279
    mpd_ssize_t klist[MPD_MAX_PREC_LOG2];
7280
    mpd_ssize_t adj, maxprec, initprec;
7281
    uint8_t sign = mpd_sign(a);
inline
                   
mpd_sign should always be inlined (cost=always) 
_mpd_qreciprocal
inline
                   
mpd_sign inlined into _mpd_qreciprocal 
_mpd_qreciprocal
7282
    int i;
7283
7284
    assert(result != a);
7285
7286
    v = &vtmp;
7287
    mpd_clear_flags(v);
inline
    
mpd_clear_flags should always be inlined (cost=always) 
_mpd_qreciprocal
inline
    
mpd_clear_flags inlined into _mpd_qreciprocal 
_mpd_qreciprocal
7288
    adj = v->digits + v->exp;
gvn
             
load of type i64 eliminated in favor of load 
_mpd_qreciprocal
gvn
                         
load of type i64 eliminated in favor of load 
_mpd_qreciprocal
7289
    v->exp = -v->digits;
7290
7291
    /* Initial approximation */
7292
    _mpd_qreciprocal_approx(z, v, status);
inline
    
_mpd_qreciprocal_approx can be inlined into _mpd_qreciprocal with cost=-14035 (threshold=250) 
_mpd_qreciprocal
inline
    
_mpd_qreciprocal_approx inlined into _mpd_qreciprocal 
_mpd_qreciprocal
7293
7294
    mpd_maxcontext(&varcontext);
inline
    
mpd_maxcontext will not be inlined into _mpd_qreciprocal because its definition is unavailable 
_mpd_qreciprocal
7295
    mpd_maxcontext(&maxcontext);
inline
    
mpd_maxcontext will not be inlined into _mpd_qreciprocal because its definition is unavailable 
_mpd_qreciprocal
7296
    varcontext.round = maxcontext.round = MPD_ROUND_TRUNC;
7297
    varcontext.emax = maxcontext.emax = MPD_MAX_EMAX + 100;
7298
    varcontext.emin = maxcontext.emin = MPD_MIN_EMIN - 100;
7299
    maxcontext.prec = MPD_MAX_PREC + 100;
7300
7301
    maxprec = ctx->prec;
gvn
                   
load of type i64 not eliminated because it is clobbered by call 
_mpd_qreciprocal
gvn
                   
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_base_ndivmod
7302
    maxprec += 2;
7303
    initprec = MPD_RDIGITS-3;
7304
7305
    i = recpr_schedule_prec(klist, maxprec, initprec);
inline
        
recpr_schedule_prec can be inlined into _mpd_qreciprocal with cost=-14990 (threshold=325) 
_mpd_qreciprocal
inline
        
recpr_schedule_prec inlined into _mpd_qreciprocal 
_mpd_qreciprocal
7306
    for (; i >= 0; i--) {
loop-vectorize
    
loop not vectorized 
_mpd_base_ndivmod
7307
         /* Loop invariant: z->digits <= klist[i]+7 */
7308
         /* Let s := z**2, exact result */
7309
        _mpd_qmul_exact(&s, z, z, &maxcontext, status);
inline
        
_mpd_qmul_exact too costly to inline (cost=265, threshold=250) 
_mpd_qreciprocal
inline
        
_mpd_qmul_exact will not be inlined into _mpd_qreciprocal 
_mpd_qreciprocal
inline
        
_mpd_qmul_exact can be inlined into _mpd_base_ndivmod with cost=245 (threshold=250) 
_mpd_base_ndivmod
inline
        
_mpd_qmul_exact inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7310
        varcontext.prec = 2*klist[i] + 5;
licm
                   
hosting getelementptr 
_mpd_qreciprocal
7311
        if (v->digits > varcontext.prec) {
licm
               
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qreciprocal
gvn
               
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qreciprocal
licm
               
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_base_ndivmod
gvn
               
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_base_ndivmod
7312
            /* Let t := v, truncated to n >= 2*k+5 fraction digits */
7313
            mpd_qshiftr(&t, v, v->digits-varcontext.prec, status);
inline
            
mpd_qshiftr too costly to inline (cost=630, threshold=625) 
_mpd_qreciprocal
inline
            
mpd_qshiftr will not be inlined into _mpd_qreciprocal 
_mpd_qreciprocal
inline
            
mpd_qshiftr too costly to inline (cost=630, threshold=625) 
_mpd_base_ndivmod
inline
            
mpd_qshiftr will not be inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7314
            t.exp = -varcontext.prec;
licm
                                
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qreciprocal
gvn
                                
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qreciprocal
licm
                                
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_base_ndivmod
gvn
                                
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_base_ndivmod
7315
            /* Let t := trunc(v)*s, truncated to n >= 2*k+1 fraction digits */
7316
            mpd_qmul(&t, &t, &s, &varcontext, status);
inline
            
mpd_qmul can be inlined into _mpd_qreciprocal with cost=45 (threshold=375) 
_mpd_qreciprocal
inline
            
mpd_qmul inlined into _mpd_qreciprocal 
_mpd_qreciprocal
7317
        }
7318
        else { /* v->digits <= 2*k+5 */
7319
            /* Let t := v*s, truncated to n >= 2*k+1 fraction digits */
7320
            mpd_qmul(&t, v, &s, &varcontext, status);
inline
            
mpd_qmul can be inlined into _mpd_qreciprocal with cost=45 (threshold=375) 
_mpd_qreciprocal
inline
            
mpd_qmul inlined into _mpd_qreciprocal 
_mpd_qreciprocal
7321
        }
7322
        /* Let s := 2*z, exact result */
7323
        _mpd_qmul_exact(&s, z, &two, &maxcontext, status);
inline
        
_mpd_qmul_exact too costly to inline (cost=265, threshold=250) 
_mpd_qreciprocal
inline
        
_mpd_qmul_exact will not be inlined into _mpd_qreciprocal 
_mpd_qreciprocal
inline
        
_mpd_qmul_exact can be inlined into _mpd_base_ndivmod with cost=245 (threshold=250) 
_mpd_base_ndivmod
inline
        
_mpd_qmul_exact inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7324
        /* s.digits < t.digits <= 2*k+5, |adjexp(s)-adjexp(t)| <= 1,
7325
         * so the subtraction generates at most 2*k+6 <= klist[i+1]+7
7326
         * digits. The loop invariant is preserved. */
7327
        _mpd_qsub_exact(z, &s, &t, &maxcontext, status);
inline
        
_mpd_qsub_exact can be inlined into _mpd_qreciprocal with cost=220 (threshold=250) 
_mpd_qreciprocal
inline
        
_mpd_qsub_exact inlined into _mpd_qreciprocal 
_mpd_qreciprocal
7328
    }
7329
7330
    if (!mpd_isspecial(z)) {
inline
         
mpd_isspecial should always be inlined (cost=always) 
_mpd_qreciprocal
inline
         
mpd_isspecial inlined into _mpd_qreciprocal 
_mpd_qreciprocal
7331
        z->exp -= adj;
gvn
               
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qreciprocal
gvn
               
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_base_ndivmod
7332
        mpd_set_flags(z, sign);
inline
        
mpd_set_flags should always be inlined (cost=always) 
_mpd_qreciprocal
inline
        
mpd_set_flags inlined into _mpd_qreciprocal 
_mpd_qreciprocal
7333
    }
7334
7335
    mpd_del(&s);
inline
    
mpd_del should always be inlined (cost=always) 
_mpd_qreciprocal
inline
    
mpd_del inlined into _mpd_qreciprocal 
_mpd_qreciprocal
7336
    mpd_del(&t);
inline
    
mpd_del should always be inlined (cost=always) 
_mpd_qreciprocal
inline
    
mpd_del inlined into _mpd_qreciprocal 
_mpd_qreciprocal
7337
    mpd_qfinalize(z, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
_mpd_qreciprocal
inline
    
mpd_qfinalize will not be inlined into _mpd_qreciprocal 
_mpd_qreciprocal
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
_mpd_base_ndivmod
inline
    
mpd_qfinalize will not be inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7338
}
7339
7340
/*
7341
 * Internal function for large numbers:
7342
 *
7343
 *     q, r = divmod(coeff(a), coeff(b))
7344
 *
7345
 * Strategy: Multiply the dividend by the reciprocal of the divisor. The
7346
 * inexact result is fixed by a small loop, using at most one iteration.
7347
 *
7348
 * ACL2 proofs:
7349
 * ------------
7350
 *    1) q is a natural number.  (ndivmod-quotient-natp)
7351
 *    2) r is a natural number.  (ndivmod-remainder-natp)
7352
 *    3) a = q * b + r           (ndivmod-q*b+r==a)
7353
 *    4) r < b                   (ndivmod-remainder-<-b)
7354
 */
7355
static void
7356
_mpd_base_ndivmod(mpd_t *q, mpd_t *r, const mpd_t *a, const mpd_t *b,
7357
                  uint32_t *status)
7358
{
7359
    mpd_context_t workctx;
7360
    mpd_t *qq = q, *rr = r;
7361
    mpd_t aa, bb;
7362
    int k;
7363
7364
    _mpd_copy_shared(&aa, a);
inline
    
_mpd_copy_shared can be inlined into _mpd_base_ndivmod with cost=0 (threshold=487) 
_mpd_base_ndivmod
inline
    
_mpd_copy_shared inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7365
    _mpd_copy_shared(&bb, b);
inline
    
_mpd_copy_shared can be inlined into _mpd_base_ndivmod with cost=-15000 (threshold=487) 
_mpd_base_ndivmod
inline
    
_mpd_copy_shared inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7366
7367
    mpd_set_positive(&aa);
inline
    
mpd_set_positive should always be inlined (cost=always) 
_mpd_base_ndivmod
inline
    
mpd_set_positive inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7368
    mpd_set_positive(&bb);
inline
    
mpd_set_positive should always be inlined (cost=always) 
_mpd_base_ndivmod
inline
    
mpd_set_positive inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7369
    aa.exp = 0;
7370
    bb.exp = 0;
7371
7372
    if (q == a || q == b) {
7373
        if ((qq = mpd_qnew()) == NULL) {
inline
                  
mpd_qnew will not be inlined into _mpd_base_ndivmod because its definition is unavailable 
_mpd_base_ndivmod
7374
            *status |= MPD_Malloc_error;
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
_mpd_base_ndivmod
7375
            goto nanresult;
7376
        }
7377
    }
7378
    if (r == a || r == b) {
7379
        if ((rr = mpd_qnew()) == NULL) {
inline
                  
mpd_qnew will not be inlined into _mpd_base_ndivmod because its definition is unavailable 
_mpd_base_ndivmod
7380
            *status |= MPD_Malloc_error;
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
_mpd_base_ndivmod
7381
            goto nanresult;
7382
        }
7383
    }
7384
7385
    mpd_maxcontext(&workctx);
inline
    
mpd_maxcontext will not be inlined into _mpd_base_ndivmod because its definition is unavailable 
_mpd_base_ndivmod
7386
7387
    /* Let prec := adigits - bdigits + 4 */
7388
    workctx.prec = a->digits - b->digits + 1 + 3;
gvn
                      
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_base_ndivmod
gvn
                                  
load of type i64 not eliminated in favor of load because it is clobbered by call 
_mpd_base_ndivmod
7389
    if (a->digits > MPD_MAX_PREC || workctx.prec > MPD_MAX_PREC) {
7390
        *status |= MPD_Division_impossible;
gvn
                
load of type i32 not eliminated because it is clobbered by call 
_mpd_base_ndivmod
7391
        goto nanresult;
7392
    }
7393
7394
    /* Let x := _mpd_qreciprocal(b, prec)
7395
     * Then x is bounded by:
7396
     *    1) 1/b - 10**(-prec - bdigits) < x < 1/b + 10**(-prec - bdigits)
7397
     *    2) 1/b - 10**(-adigits - 4) < x < 1/b + 10**(-adigits - 4)
7398
     */
7399
    _mpd_qreciprocal(rr, &bb, &workctx, &workctx.status);
inline
    
_mpd_qreciprocal can be inlined into _mpd_base_ndivmod with cost=-12735 (threshold=250) 
_mpd_base_ndivmod
inline
    
_mpd_qreciprocal inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7400
7401
    /* Get an estimate for the quotient. Let q := a * x
7402
     * Then q is bounded by:
7403
     *    3) a/b - 10**-4 < q < a/b + 10**-4
7404
     */
7405
    _mpd_qmul(qq, &aa, rr, &workctx, &workctx.status);
inline
    
_mpd_qmul too costly to inline (cost=815, threshold=812) 
_mpd_base_ndivmod
inline
    
_mpd_qmul will not be inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7406
    /* Truncate q to an integer:
7407
     *    4) a/b - 2 < trunc(q) < a/b + 1
7408
     */
7409
    mpd_qtrunc(qq, qq, &workctx, &workctx.status);
inline
    
mpd_qtrunc can be inlined into _mpd_base_ndivmod with cost=5 (threshold=375) 
_mpd_base_ndivmod
inline
    
mpd_qtrunc inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7410
7411
    workctx.prec = aa.digits + 3;
gvn
                      
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_base_ndivmod
7412
    workctx.emax = MPD_MAX_EMAX + 3;
7413
    workctx.emin = MPD_MIN_EMIN - 3;
7414
    /* Multiply the estimate for q by b:
7415
     *    5) a - 2 * b < trunc(q) * b < a + b
7416
     */
7417
    _mpd_qmul(rr, &bb, qq, &workctx, &workctx.status);
inline
    
_mpd_qmul too costly to inline (cost=815, threshold=812) 
_mpd_base_ndivmod
inline
    
_mpd_qmul will not be inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7418
    /* Get the estimate for r such that a = q * b + r. */
7419
    _mpd_qsub_exact(rr, &aa, rr, &workctx, &workctx.status);
inline
    
_mpd_qsub_exact can be inlined into _mpd_base_ndivmod with cost=220 (threshold=250) 
_mpd_base_ndivmod
inline
    
_mpd_qsub_exact inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7420
7421
    /* Fix the result. At this point -b < r < 2*b, so the correction loop
7422
       takes at most one iteration. */
7423
    for (k = 0;; k++) {
loop-vectorize
    
loop not vectorized: vectorization is not beneficial and is not explicitly forced 
_mpd_base_ndivmod
7424
        if (mpd_isspecial(qq) || mpd_isspecial(rr)) {
inline
            
mpd_isspecial should always be inlined (cost=always) 
_mpd_base_ndivmod
inline
            
mpd_isspecial inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
inline
                                 
mpd_isspecial should always be inlined (cost=always) 
_mpd_base_ndivmod
inline
                                 
mpd_isspecial inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7425
            *status |= (workctx.status&MPD_Errors);
gvn
                                
load of type i32 eliminated in favor of phi 
_mpd_base_ndivmod
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
_mpd_base_ndivmod
7426
            goto nanresult;
7427
        }
7428
        if (k > 2) { /* Allow two iterations despite the proof. */
7429
            mpd_err_warn("libmpdec: internal error in "       /* GCOV_NOT_REACHED */
inline
            
fprintf will not be inlined into _mpd_base_ndivmod because its definition is unavailable 
_mpd_base_ndivmod
gvn
            
load of type %struct._IO_FILE* not eliminated because it is clobbered by call 
_mpd_base_ndivmod
7430
                         "_mpd_base_ndivmod: please report"); /* GCOV_NOT_REACHED */
7431
            *status |= MPD_Invalid_operation;                 /* GCOV_NOT_REACHED */
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
_mpd_base_ndivmod
7432
            goto nanresult;                                   /* GCOV_NOT_REACHED */
7433
        }
7434
        /* r < 0 */
7435
        else if (_mpd_cmp(&zero, rr) == 1) {
inline
                 
_mpd_cmp too costly to inline (cost=550, threshold=250) 
_mpd_base_ndivmod
inline
                 
_mpd_cmp will not be inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7436
            _mpd_qadd_exact(rr, rr, &bb, &workctx, &workctx.status);
inline
            
_mpd_qadd_exact can be inlined into _mpd_base_ndivmod with cost=220 (threshold=250) 
_mpd_base_ndivmod
inline
            
_mpd_qadd_exact inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7437
            _mpd_qadd_exact(qq, qq, &minus_one, &workctx, &workctx.status);
inline
            
_mpd_qadd_exact can be inlined into _mpd_base_ndivmod with cost=220 (threshold=250) 
_mpd_base_ndivmod
inline
            
_mpd_qadd_exact inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7438
        }
7439
        /* 0 <= r < b */
7440
        else if (_mpd_cmp(rr, &bb) == -1) {
inline
                 
_mpd_cmp too costly to inline (cost=525, threshold=250) 
_mpd_base_ndivmod
inline
                 
_mpd_cmp will not be inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7441
            break;
7442
        }
7443
        /* r >= b */
7444
        else {
7445
            _mpd_qsub_exact(rr, rr, &bb, &workctx, &workctx.status);
inline
            
_mpd_qsub_exact can be inlined into _mpd_base_ndivmod with cost=-14780 (threshold=250) 
_mpd_base_ndivmod
inline
            
_mpd_qsub_exact inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7446
            _mpd_qadd_exact(qq, qq, &one, &workctx, &workctx.status);
inline
            
_mpd_qadd_exact can be inlined into _mpd_base_ndivmod with cost=220 (threshold=250) 
_mpd_base_ndivmod
inline
            
_mpd_qadd_exact inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7447
        }
7448
    }
7449
7450
    if (qq != q) {
7451
        if (!mpd_qcopy(q, qq, status)) {
inline
             
mpd_qcopy can be inlined into _mpd_base_ndivmod with cost=215 (threshold=250) 
_mpd_base_ndivmod
inline
             
mpd_qcopy inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7452
            goto nanresult; /* GCOV_UNLIKELY */
7453
        }
7454
        mpd_del(qq);
inline
        
mpd_del should always be inlined (cost=always) 
_mpd_base_ndivmod
inline
        
mpd_del inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7455
    }
7456
    if (rr != r) {
7457
        if (!mpd_qcopy(r, rr, status)) {
inline
             
mpd_qcopy can be inlined into _mpd_base_ndivmod with cost=215 (threshold=250) 
_mpd_base_ndivmod
inline
             
mpd_qcopy inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7458
            goto nanresult; /* GCOV_UNLIKELY */
7459
        }
7460
        mpd_del(rr);
inline
        
mpd_del should always be inlined (cost=always) 
_mpd_base_ndivmod
inline
        
mpd_del inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7461
    }
7462
7463
    *status |= (workctx.status&MPD_Errors);
gvn
                        
load of type i32 not eliminated because it is clobbered by call 
_mpd_base_ndivmod
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_base_ndivmod
7464
    return;
7465
7466
7467
nanresult:
7468
    if (qq && qq != q) mpd_del(qq);
inline
                       
mpd_del should always be inlined (cost=always) 
_mpd_base_ndivmod
inline
                       
mpd_del inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7469
    if (rr && rr != r) mpd_del(rr);
inline
                       
mpd_del should always be inlined (cost=always) 
_mpd_base_ndivmod
inline
                       
mpd_del inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7470
    mpd_setspecial(q, MPD_POS, MPD_NAN);
inline
    
mpd_setspecial can be inlined into _mpd_base_ndivmod with cost=115 (threshold=250) 
_mpd_base_ndivmod
inline
    
mpd_setspecial inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7471
    mpd_setspecial(r, MPD_POS, MPD_NAN);
inline
    
mpd_setspecial can be inlined into _mpd_base_ndivmod with cost=115 (threshold=250) 
_mpd_base_ndivmod
inline
    
mpd_setspecial inlined into _mpd_base_ndivmod 
_mpd_base_ndivmod
7472
}
7473
7474
/* LIBMPDEC_ONLY */
7475
/*
7476
 * Schedule the optimal precision increase for the Newton iteration.
7477
 *   v := input operand
7478
 *   z_0 := initial approximation
7479
 *   initprec := natural number such that abs(sqrt(v) - z_0) < 10**-initprec
7480
 *   maxprec := target precision
7481
 *
7482
 * For convenience the output klist contains the elements in reverse order:
7483
 *   klist := [k_n-1, ..., k_0], where
7484
 *     1) k_0 <= initprec and
7485
 *     2) abs(sqrt(v) - result) < 10**(-2*k_n-1 + 2) <= 10**-maxprec.
7486
 */
7487
static inline int
7488
invroot_schedule_prec(mpd_ssize_t klist[MPD_MAX_PREC_LOG2],
7489
                      mpd_ssize_t maxprec, mpd_ssize_t initprec)
7490
{
7491
    mpd_ssize_t k;
7492
    int i;
7493
7494
    assert(maxprec >= 3 && initprec >= 3);
7495
    if (maxprec <= initprec) return -1;
7496
7497
    i = 0; k = maxprec;
7498
    do {
loop-vectorize
    
loop not vectorized: could not determine number of loop iterations 
mpd_qinvroot
loop-vectorize
    
loop not vectorized 
mpd_qinvroot
7499
        k = (k+3) / 2;
7500
        klist[i++] = k;
7501
    } while (k > initprec);
7502
7503
    return i-1;
7504
}
7505
7506
/*
7507
 * Initial approximation for the inverse square root function.
7508
 *   Input:
7509
 *     v := rational number, with 1 <= v < 100
7510
 *     vhat := floor(v * 10**6)
7511
 *   Output:
7512
 *     z := approximation to 1/sqrt(v), such that abs(z - 1/sqrt(v)) < 10**-3.
7513
 */
7514
static inline void
7515
_invroot_init_approx(mpd_t *z, mpd_uint_t vhat)
7516
{
7517
    mpd_uint_t lo = 1000;
7518
    mpd_uint_t hi = 10000;
7519
    mpd_uint_t a, sq;
7520
7521
    assert(lo*lo <= vhat && vhat < (hi+1)*(hi+1));
7522
7523
    for(;;) {
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qinvroot
loop-vectorize
    
loop not vectorized 
mpd_qinvroot
7524
        a = (lo + hi) / 2;
7525
        sq = a * a;
7526
        if (vhat >= sq) {
7527
            if (vhat < sq + 2*a + 1) {
7528
                break;
7529
            }
7530
            lo = a + 1;
7531
        }
7532
        else {
7533
            hi = a - 1;
7534
        }
7535
    }
7536
7537
    /*
7538
     * After the binary search we have:
7539
     *  1) a**2 <= floor(v * 10**6) < (a + 1)**2
7540
     * This implies:
7541
     *  2) a**2 <= v * 10**6 < (a + 1)**2
7542
     *  3) a <= sqrt(v) * 10**3 < a + 1
7543
     * Since 10**3 <= a:
7544
     *  4) 0 <= 10**prec/a - 1/sqrt(v) < 10**-prec
7545
     * We have:
7546
     *  5) 10**3/a - 10**-3 < floor(10**9/a) * 10**-6 <= 10**3/a
7547
     * Merging 4) and 5):
7548
     *  6) abs(floor(10**9/a) * 10**-6 - 1/sqrt(v)) < 10**-3
7549
     */
7550
    mpd_minalloc(z);
inline
    
mpd_minalloc should always be inlined (cost=always) 
_invroot_init_approx
inline
    
mpd_minalloc inlined into _invroot_init_approx 
_invroot_init_approx
7551
    mpd_clear_flags(z);
inline
    
mpd_clear_flags should always be inlined (cost=always) 
_invroot_init_approx
inline
    
mpd_clear_flags inlined into _invroot_init_approx 
_invroot_init_approx
7552
    z->data[0] = 1000000000UL / a;
gvn
       
load of type i64* not eliminated because it is clobbered by call 
_mpd_qinvroot
gvn
       
load of type i64* not eliminated because it is clobbered by call 
mpd_qinvroot
7553
    z->len = 1;
7554
    z->exp = -6;
7555
    mpd_setdigits(z);
inline
    
mpd_setdigits can be inlined into _invroot_init_approx with cost=295 (threshold=325) 
_invroot_init_approx
inline
    
mpd_setdigits inlined into _invroot_init_approx 
_invroot_init_approx
7556
}
7557
7558
/*
7559
 * Set 'result' to 1/sqrt(a).
7560
 *   Relative error: abs(result - 1/sqrt(a)) < 10**-prec * 1/sqrt(a)
7561
 */
7562
static void
7563
_mpd_qinvroot(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
7564
              uint32_t *status)
7565
{
7566
    uint32_t workstatus = 0;
7567
    mpd_context_t varcontext, maxcontext;
7568
    mpd_t *z = result;         /* current approximation */
7569
    mpd_t *v;                  /* a, normalized to a number between 1 and 100 */
7570
    MPD_NEW_SHARED(vtmp, a);   /* by default v will share data with a */
gvn
    
load of type i8 eliminated in favor of load 
mpd_qinvroot
7571
    MPD_NEW_STATIC(s,0,0,0,0); /* temporary variable */
7572
    MPD_NEW_STATIC(t,0,0,0,0); /* temporary variable */
7573
    MPD_NEW_CONST(one_half,0,-1,1,1,1,5);
7574
    MPD_NEW_CONST(three,0,0,1,1,1,3);
7575
    mpd_ssize_t klist[MPD_MAX_PREC_LOG2];
7576
    mpd_ssize_t ideal_exp, shift;
7577
    mpd_ssize_t adj, tz;
7578
    mpd_ssize_t maxprec, fracdigits;
7579
    mpd_uint_t vhat, dummy;
7580
    int i, n;
7581
7582
7583
    ideal_exp = -(a->exp - (a->exp & 1)) / 2;
gvn
                     
load of type i64 eliminated in favor of load 
_mpd_qinvroot
7584
7585
    v = &vtmp;
7586
    if (result == a) {
7587
        if ((v = mpd_qncopy(a)) == NULL) {
inline
                 
mpd_qncopy can be inlined into _mpd_qinvroot with cost=100 (threshold=250) 
_mpd_qinvroot
inline
                 
mpd_qncopy inlined into _mpd_qinvroot 
_mpd_qinvroot
7588
            mpd_seterror(result, MPD_Malloc_error, status);
inline
            
mpd_seterror can be inlined into _mpd_qinvroot with cost=130 (threshold=250) 
_mpd_qinvroot
inline
            
mpd_seterror inlined into _mpd_qinvroot 
_mpd_qinvroot
7589
            return;
7590
        }
7591
    }
7592
7593
    /* normalize a to 1 <= v < 100 */
7594
    if ((v->digits+v->exp) & 1) {
gvn
            
load of type i64 eliminated in favor of phi 
_mpd_qinvroot
gvn
                      
load of type i64 eliminated in favor of phi 
_mpd_qinvroot
7595
        fracdigits = v->digits - 1;
7596
        v->exp = -fracdigits;
7597
        n = (v->digits > 7) ? 7 : (int)v->digits;
7598
        /* Let vhat := floor(v * 10**(2*initprec)) */
7599
        _mpd_get_msdigits(&dummy, &vhat, v, n);
inline
        
_mpd_get_msdigits can be inlined into _mpd_qinvroot with cost=145 (threshold=325) 
_mpd_qinvroot
inline
        
_mpd_get_msdigits inlined into _mpd_qinvroot 
_mpd_qinvroot
7600
        if (n < 7) {
7601
            vhat *= mpd_pow10[7-n];
7602
        }
7603
    }
7604
    else {
7605
        fracdigits = v->digits - 2;
7606
        v->exp = -fracdigits;
7607
        n = (v->digits > 8) ? 8 : (int)v->digits;
7608
        /* Let vhat := floor(v * 10**(2*initprec)) */
7609
        _mpd_get_msdigits(&dummy, &vhat, v, n);
inline
        
_mpd_get_msdigits can be inlined into _mpd_qinvroot with cost=-14855 (threshold=325) 
_mpd_qinvroot
inline
        
_mpd_get_msdigits inlined into _mpd_qinvroot 
_mpd_qinvroot
7610
        if (n < 8) {
7611
            vhat *= mpd_pow10[8-n];
7612
        }
7613
    }
7614
    adj = (a->exp-v->exp) / 2;
gvn
              
load of type i64 not eliminated in favor of load because it is clobbered by store 
_mpd_qinvroot
gvn
                     
load of type i64 eliminated in favor of phi 
_mpd_qinvroot
gvn
              
load of type i64 not eliminated in favor of load because it is clobbered by store 
mpd_qinvroot
7615
7616
    /* initial approximation */
7617
    _invroot_init_approx(z, vhat);
inline
    
_invroot_init_approx can be inlined into _mpd_qinvroot with cost=-14485 (threshold=325) 
_mpd_qinvroot
inline
    
_invroot_init_approx inlined into _mpd_qinvroot 
_mpd_qinvroot
7618
7619
    mpd_maxcontext(&maxcontext);
inline
    
mpd_maxcontext will not be inlined into _mpd_qinvroot because its definition is unavailable 
_mpd_qinvroot
7620
    mpd_maxcontext(&varcontext);
inline
    
mpd_maxcontext will not be inlined into _mpd_qinvroot because its definition is unavailable 
_mpd_qinvroot
7621
    varcontext.round = MPD_ROUND_TRUNC;
7622
    maxprec = ctx->prec + 1;
gvn
                   
load of type i64 not eliminated because it is clobbered by call 
_mpd_qinvroot
7623
7624
    /* initprec == 3 */
7625
    i = invroot_schedule_prec(klist, maxprec, 3);
inline
        
invroot_schedule_prec can be inlined into _mpd_qinvroot with cost=-14990 (threshold=325) 
_mpd_qinvroot
inline
        
invroot_schedule_prec inlined into _mpd_qinvroot 
_mpd_qinvroot
7626
    for (; i >= 0; i--) {
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qinvroot
loop-vectorize
    
loop not vectorized 
mpd_qinvroot
7627
        varcontext.prec = 2*klist[i]+2;
licm
                   
hosting getelementptr 
_mpd_qinvroot
gvn
                            
load eliminated by PRE 
mpd_qinvroot
7628
        mpd_qmul(&s, z, z, &maxcontext, &workstatus);
inline
        
mpd_qmul can be inlined into _mpd_qinvroot with cost=45 (threshold=375) 
_mpd_qinvroot
inline
        
mpd_qmul inlined into _mpd_qinvroot 
_mpd_qinvroot
7629
        if (v->digits > varcontext.prec) {
licm
               
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qinvroot
licm
                                   
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qinvroot
gvn
               
load of type i64 not eliminated because it is clobbered by call 
_mpd_qinvroot
gvn
                                   
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qinvroot
licm
               
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qinvroot
licm
                                   
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qinvroot
gvn
               
load of type i64 not eliminated because it is clobbered by call 
mpd_qinvroot
gvn
                                   
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qinvroot
7630
            shift = v->digits - varcontext.prec;
7631
            mpd_qshiftr(&t, v, shift, &workstatus);
inline
            
mpd_qshiftr too costly to inline (cost=630, threshold=625) 
_mpd_qinvroot
inline
            
mpd_qshiftr will not be inlined into _mpd_qinvroot 
_mpd_qinvroot
inline
            
mpd_qshiftr too costly to inline (cost=630, threshold=625) 
mpd_qinvroot
inline
            
mpd_qshiftr will not be inlined into mpd_qinvroot 
mpd_qinvroot
7632
            t.exp += shift;
licm
                  
failed to move load with loop-invariant address because the loop may invalidate its value 
_mpd_qinvroot
gvn
                  
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qinvroot
licm
                  
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qinvroot
gvn
                  
load of type i64 not eliminated because it is clobbered by call 
mpd_qinvroot
7633
            mpd_qmul(&t, &t, &s, &varcontext, &workstatus);
inline
            
mpd_qmul can be inlined into _mpd_qinvroot with cost=45 (threshold=375) 
_mpd_qinvroot
inline
            
mpd_qmul inlined into _mpd_qinvroot 
_mpd_qinvroot
7634
        }
7635
        else {
7636
            mpd_qmul(&t, v, &s, &varcontext, &workstatus);
inline
            
mpd_qmul can be inlined into _mpd_qinvroot with cost=45 (threshold=375) 
_mpd_qinvroot
inline
            
mpd_qmul inlined into _mpd_qinvroot 
_mpd_qinvroot
7637
        }
7638
        mpd_qsub(&t, &three, &t, &maxcontext, &workstatus);
inline
        
mpd_qsub too costly to inline (cost=670, threshold=625) 
_mpd_qinvroot
inline
        
mpd_qsub will not be inlined into _mpd_qinvroot 
_mpd_qinvroot
inline
        
mpd_qsub too costly to inline (cost=670, threshold=625) 
mpd_qinvroot
inline
        
mpd_qsub will not be inlined into mpd_qinvroot 
mpd_qinvroot
7639
        mpd_qmul(z, z, &t, &varcontext, &workstatus);
inline
        
mpd_qmul can be inlined into _mpd_qinvroot with cost=45 (threshold=375) 
_mpd_qinvroot
inline
        
mpd_qmul inlined into _mpd_qinvroot 
_mpd_qinvroot
7640
        mpd_qmul(z, z, &one_half, &maxcontext, &workstatus);
inline
        
mpd_qmul can be inlined into _mpd_qinvroot with cost=45 (threshold=375) 
_mpd_qinvroot
inline
        
mpd_qmul inlined into _mpd_qinvroot 
_mpd_qinvroot
7641
    }
7642
7643
    z->exp -= adj;
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qinvroot
gvn
           
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qinvroot
7644
7645
    tz = mpd_trail_zeros(result);
inline
         
mpd_trail_zeros can be inlined into _mpd_qinvroot with cost=65 (threshold=250) 
_mpd_qinvroot
inline
         
mpd_trail_zeros inlined into _mpd_qinvroot 
_mpd_qinvroot
7646
    shift = ideal_exp - result->exp;
gvn
                                
load of type i64 eliminated in favor of sub 
_mpd_qinvroot
7647
    shift = (tz > shift) ? shift : tz;
7648
    if (shift > 0) {
7649
        mpd_qshiftr_inplace(result, shift);
inline
        
mpd_qshiftr_inplace too costly to inline (cost=475, threshold=250) 
_mpd_qinvroot
inline
        
mpd_qshiftr_inplace will not be inlined into _mpd_qinvroot 
_mpd_qinvroot
inline
        
mpd_qshiftr_inplace too costly to inline (cost=475, threshold=250) 
mpd_qinvroot
inline
        
mpd_qshiftr_inplace will not be inlined into mpd_qinvroot 
mpd_qinvroot
7650
        result->exp += shift;
gvn
                    
load of type i64 not eliminated in favor of store because it is clobbered by call 
_mpd_qinvroot
gvn
                    
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qinvroot
7651
    }
7652
7653
7654
    mpd_del(&s);
inline
    
mpd_del should always be inlined (cost=always) 
_mpd_qinvroot
inline
    
mpd_del inlined into _mpd_qinvroot 
_mpd_qinvroot
7655
    mpd_del(&t);
inline
    
mpd_del should always be inlined (cost=always) 
_mpd_qinvroot
inline
    
mpd_del inlined into _mpd_qinvroot 
_mpd_qinvroot
7656
    if (v != &vtmp) mpd_del(v);
inline
                    
mpd_del should always be inlined (cost=always) 
_mpd_qinvroot
inline
                    
mpd_del inlined into _mpd_qinvroot 
_mpd_qinvroot
7657
    *status |= (workstatus&MPD_Errors);
gvn
                
load of type i32 not eliminated in favor of store because it is clobbered by call 
_mpd_qinvroot
gvn
            
load of type i32 not eliminated because it is clobbered by call 
_mpd_qinvroot
gvn
                
load of type i32 not eliminated in favor of store because it is clobbered by call 
mpd_qinvroot
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qinvroot
7658
    *status |= (MPD_Rounded|MPD_Inexact);
7659
}
7660
7661
void
7662
mpd_qinvroot(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
7663
             uint32_t *status)
7664
{
7665
    mpd_context_t workctx;
7666
7667
    if (mpd_isspecial(a)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qinvroot
inline
        
mpd_isspecial inlined into mpd_qinvroot 
mpd_qinvroot
7668
        if (mpd_qcheck_nan(result, a, ctx, status)) {
inline
            
mpd_qcheck_nan too costly to inline (cost=335, threshold=250) 
mpd_qinvroot
inline
            
mpd_qcheck_nan will not be inlined into mpd_qinvroot 
mpd_qinvroot
7669
            return;
7670
        }
7671
        if (mpd_isnegative(a)) {
inline
            
mpd_isnegative should always be inlined (cost=always) 
mpd_qinvroot
inline
            
mpd_isnegative inlined into mpd_qinvroot 
mpd_qinvroot
7672
            mpd_seterror(result, MPD_Invalid_operation, status);
inline
            
mpd_seterror can be inlined into mpd_qinvroot with cost=130 (threshold=250) 
mpd_qinvroot
inline
            
mpd_seterror inlined into mpd_qinvroot 
mpd_qinvroot
7673
            return;
7674
        }
7675
        /* positive infinity */
7676
        _settriple(result, MPD_POS, 0, mpd_etiny(ctx));
inline
                                       
mpd_etiny should always be inlined (cost=always) 
mpd_qinvroot
inline
                                       
mpd_etiny inlined into mpd_qinvroot 
mpd_qinvroot
inline
        
_settriple can be inlined into mpd_qinvroot with cost=180 (threshold=250) 
mpd_qinvroot
inline
        
_settriple inlined into mpd_qinvroot 
mpd_qinvroot
7677
        *status |= MPD_Clamped;
gvn
                
load of type i32 not eliminated because it is clobbered by call 
mpd_qinvroot
7678
        return;
7679
    }
7680
    if (mpd_iszero(a)) {
inline
        
mpd_iszero should always be inlined (cost=always) 
mpd_qinvroot
inline
        
mpd_iszero inlined into mpd_qinvroot 
mpd_qinvroot
7681
        mpd_setspecial(result, mpd_sign(a), MPD_INF);
inline
                               
mpd_sign should always be inlined (cost=always) 
mpd_qinvroot
inline
                               
mpd_sign inlined into mpd_qinvroot 
mpd_qinvroot
inline
        
mpd_setspecial can be inlined into mpd_qinvroot with cost=120 (threshold=250) 
mpd_qinvroot
inline
        
mpd_setspecial inlined into mpd_qinvroot 
mpd_qinvroot
7682
        *status |= MPD_Division_by_zero;
gvn
                
load of type i32 not eliminated because it is clobbered by call 
mpd_qinvroot
7683
        return;
7684
    }
7685
    if (mpd_isnegative(a)) {
inline
        
mpd_isnegative should always be inlined (cost=always) 
mpd_qinvroot
inline
        
mpd_isnegative inlined into mpd_qinvroot 
mpd_qinvroot
7686
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qinvroot with cost=130 (threshold=250) 
mpd_qinvroot
inline
        
mpd_seterror inlined into mpd_qinvroot 
mpd_qinvroot
7687
        return;
7688
    }
7689
7690
    workctx = *ctx;
7691
    workctx.prec += 2;
7692
    workctx.round = MPD_ROUND_HALF_EVEN;
7693
    _mpd_qinvroot(result, a, &workctx, status);
inline
    
_mpd_qinvroot can be inlined into mpd_qinvroot with cost=-12170 (threshold=250) 
mpd_qinvroot
inline
    
_mpd_qinvroot inlined into mpd_qinvroot 
mpd_qinvroot
7694
    mpd_qfinalize(result, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qinvroot
inline
    
mpd_qfinalize will not be inlined into mpd_qinvroot 
mpd_qinvroot
7695
}
7696
/* END LIBMPDEC_ONLY */
7697
7698
/* Algorithm from decimal.py */
7699
void
7700
mpd_qsqrt(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
7701
          uint32_t *status)
7702
{
7703
    mpd_context_t maxcontext;
7704
    MPD_NEW_STATIC(c,0,0,0,0);
7705
    MPD_NEW_STATIC(q,0,0,0,0);
7706
    MPD_NEW_STATIC(r,0,0,0,0);
7707
    MPD_NEW_CONST(two,0,0,1,1,1,2);
7708
    mpd_ssize_t prec, ideal_exp;
7709
    mpd_ssize_t l, shift;
7710
    int exact = 0;
7711
7712
7713
    ideal_exp = (a->exp - (a->exp & 1)) / 2;
7714
7715
    if (mpd_isspecial(a)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qsqrt
inline
        
mpd_isspecial inlined into mpd_qsqrt 
mpd_qsqrt
7716
        if (mpd_qcheck_nan(result, a, ctx, status)) {
inline
            
mpd_qcheck_nan too costly to inline (cost=335, threshold=250) 
mpd_qsqrt
inline
            
mpd_qcheck_nan will not be inlined into mpd_qsqrt 
mpd_qsqrt
7717
            return;
7718
        }
7719
        if (mpd_isnegative(a)) {
inline
            
mpd_isnegative should always be inlined (cost=always) 
mpd_qsqrt
inline
            
mpd_isnegative inlined into mpd_qsqrt 
mpd_qsqrt
7720
            mpd_seterror(result, MPD_Invalid_operation, status);
inline
            
mpd_seterror can be inlined into mpd_qsqrt with cost=130 (threshold=250) 
mpd_qsqrt
inline
            
mpd_seterror inlined into mpd_qsqrt 
mpd_qsqrt
7721
            return;
7722
        }
7723
        mpd_setspecial(result, MPD_POS, MPD_INF);
inline
        
mpd_setspecial can be inlined into mpd_qsqrt with cost=115 (threshold=250) 
mpd_qsqrt
inline
        
mpd_setspecial inlined into mpd_qsqrt 
mpd_qsqrt
7724
        return;
7725
    }
7726
    if (mpd_iszero(a)) {
inline
        
mpd_iszero should always be inlined (cost=always) 
mpd_qsqrt
inline
        
mpd_iszero inlined into mpd_qsqrt 
mpd_qsqrt
7727
        _settriple(result, mpd_sign(a), 0, ideal_exp);
inline
                           
mpd_sign should always be inlined (cost=always) 
mpd_qsqrt
inline
                           
mpd_sign inlined into mpd_qsqrt 
mpd_qsqrt
inline
        
_settriple can be inlined into mpd_qsqrt with cost=-14820 (threshold=250) 
mpd_qsqrt
inline
        
_settriple inlined into mpd_qsqrt 
mpd_qsqrt
7728
        mpd_qfinalize(result, ctx, status);
inline
        
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qsqrt
inline
        
mpd_qfinalize will not be inlined into mpd_qsqrt 
mpd_qsqrt
7729
        return;
7730
    }
7731
    if (mpd_isnegative(a)) {
inline
        
mpd_isnegative should always be inlined (cost=always) 
mpd_qsqrt
inline
        
mpd_isnegative inlined into mpd_qsqrt 
mpd_qsqrt
7732
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qsqrt with cost=130 (threshold=250) 
mpd_qsqrt
inline
        
mpd_seterror inlined into mpd_qsqrt 
mpd_qsqrt
7733
        return;
7734
    }
7735
7736
    mpd_maxcontext(&maxcontext);
inline
    
mpd_maxcontext will not be inlined into mpd_qsqrt because its definition is unavailable 
mpd_qsqrt
7737
    prec = ctx->prec + 1;
gvn
                
load of type i64 not eliminated because it is clobbered by call 
mpd_qsqrt
7738
7739
    if (!mpd_qcopy(&c, a, status)) {
inline
         
mpd_qcopy can be inlined into mpd_qsqrt with cost=215 (threshold=250) 
mpd_qsqrt
inline
         
mpd_qcopy inlined into mpd_qsqrt 
mpd_qsqrt
7740
        goto malloc_error;
7741
    }
7742
    c.exp = 0;
7743
7744
    if (a->exp & 1) {
gvn
           
load of type i64 not eliminated in favor of load because it is clobbered by call 
mpd_qsqrt
7745
        if (!mpd_qshiftl(&c, &c, 1, status)) {
inline
             
mpd_qshiftl too costly to inline (cost=315, threshold=250) 
mpd_qsqrt
inline
             
mpd_qshiftl will not be inlined into mpd_qsqrt 
mpd_qsqrt
7746
            goto malloc_error;
7747
        }
7748
        l = (a->digits >> 1) + 1;
gvn
                
load of type i64 not eliminated because it is clobbered by call 
mpd_qsqrt
7749
    }
7750
    else {
7751
        l = (a->digits + 1) >> 1;
gvn
                
load of type i64 not eliminated because it is clobbered by call 
mpd_qsqrt
7752
    }
7753
7754
    shift = prec - l;
7755
    if (shift >= 0) {
7756
        if (!mpd_qshiftl(&c, &c, 2*shift, status)) {
inline
             
mpd_qshiftl too costly to inline (cost=320, threshold=250) 
mpd_qsqrt
inline
             
mpd_qshiftl will not be inlined into mpd_qsqrt 
mpd_qsqrt
7757
            goto malloc_error;
7758
        }
7759
        exact = 1;
7760
    }
7761
    else {
7762
        exact = !mpd_qshiftr_inplace(&c, -2*shift);
inline
                 
mpd_qshiftr_inplace too costly to inline (cost=475, threshold=250) 
mpd_qsqrt
inline
                 
mpd_qshiftr_inplace will not be inlined into mpd_qsqrt 
mpd_qsqrt
7763
    }
7764
7765
    ideal_exp -= shift;
7766
7767
    /* find result = floor(sqrt(c)) using Newton's method */
7768
    if (!mpd_qshiftl(result, &one, prec, status)) {
inline
         
mpd_qshiftl too costly to inline (cost=575, threshold=250) 
mpd_qsqrt
inline
         
mpd_qshiftl will not be inlined into mpd_qsqrt 
mpd_qsqrt
7769
        goto malloc_error;
7770
    }
7771
7772
    while (1) {
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qsqrt
loop-vectorize
    
loop not vectorized 
mpd_qsqrt
7773
        _mpd_qdivmod(&q, &r, &c, result, &maxcontext, &maxcontext.status);
inline
        
_mpd_qdivmod too costly to inline (cost=645, threshold=625) 
mpd_qsqrt
inline
        
_mpd_qdivmod will not be inlined into mpd_qsqrt 
mpd_qsqrt
7774
        if (mpd_isspecial(result) || mpd_isspecial(&q)) {
inline
            
mpd_isspecial should always be inlined (cost=always) 
mpd_qsqrt
inline
            
mpd_isspecial inlined into mpd_qsqrt 
mpd_qsqrt
inline
                                     
mpd_isspecial should always be inlined (cost=always) 
mpd_qsqrt
inline
                                     
mpd_isspecial inlined into mpd_qsqrt 
mpd_qsqrt
7775
            mpd_seterror(result, maxcontext.status&MPD_Errors, status);
inline
            
mpd_seterror can be inlined into mpd_qsqrt with cost=130 (threshold=250) 
mpd_qsqrt
inline
            
mpd_seterror inlined into mpd_qsqrt 
mpd_qsqrt
gvn
                                            
load of type i32 not eliminated because it is clobbered by call 
mpd_qsqrt
7776
            goto out;
7777
        }
7778
        if (_mpd_cmp(result, &q) <= 0) {
inline
            
_mpd_cmp too costly to inline (cost=525, threshold=250) 
mpd_qsqrt
inline
            
_mpd_cmp will not be inlined into mpd_qsqrt 
mpd_qsqrt
7779
            break;
7780
        }
7781
        _mpd_qadd_exact(result, result, &q, &maxcontext, &maxcontext.status);
inline
        
_mpd_qadd_exact can be inlined into mpd_qsqrt with cost=-14780 (threshold=250) 
mpd_qsqrt
inline
        
_mpd_qadd_exact inlined into mpd_qsqrt 
mpd_qsqrt
7782
        if (mpd_isspecial(result)) {
inline
            
mpd_isspecial should always be inlined (cost=always) 
mpd_qsqrt
inline
            
mpd_isspecial inlined into mpd_qsqrt 
mpd_qsqrt
7783
            mpd_seterror(result, maxcontext.status&MPD_Errors, status);
inline
            
mpd_seterror can be inlined into mpd_qsqrt with cost=130 (threshold=250) 
mpd_qsqrt
inline
            
mpd_seterror inlined into mpd_qsqrt 
mpd_qsqrt
gvn
                                            
load of type i32 eliminated in favor of phi 
mpd_qsqrt
7784
            goto out;
7785
        }
7786
        _mpd_qdivmod(result, &r, result, &two, &maxcontext, &maxcontext.status);
inline
        
_mpd_qdivmod too costly to inline (cost=630, threshold=625) 
mpd_qsqrt
inline
        
_mpd_qdivmod will not be inlined into mpd_qsqrt 
mpd_qsqrt
7787
    }
7788
7789
    if (exact) {
7790
        _mpd_qmul_exact(&r, result, result, &maxcontext, &maxcontext.status);
inline
        
_mpd_qmul_exact too costly to inline (cost=265, threshold=250) 
mpd_qsqrt
inline
        
_mpd_qmul_exact will not be inlined into mpd_qsqrt 
mpd_qsqrt
7791
        if (mpd_isspecial(&r)) {
inline
            
mpd_isspecial should always be inlined (cost=always) 
mpd_qsqrt
inline
            
mpd_isspecial inlined into mpd_qsqrt 
mpd_qsqrt
7792
            mpd_seterror(result, maxcontext.status&MPD_Errors, status);
inline
            
mpd_seterror can be inlined into mpd_qsqrt with cost=130 (threshold=250) 
mpd_qsqrt
inline
            
mpd_seterror inlined into mpd_qsqrt 
mpd_qsqrt
gvn
                                            
load of type i32 not eliminated because it is clobbered by call 
mpd_qsqrt
7793
            goto out;
7794
        }
7795
        exact = (_mpd_cmp(&r, &c) == 0);
inline
                 
_mpd_cmp too costly to inline (cost=525, threshold=250) 
mpd_qsqrt
inline
                 
_mpd_cmp will not be inlined into mpd_qsqrt 
mpd_qsqrt
7796
    }
7797
7798
    if (exact) {
7799
        if (shift >= 0) {
7800
            mpd_qshiftr_inplace(result, shift);
inline
            
mpd_qshiftr_inplace too costly to inline (cost=475, threshold=250) 
mpd_qsqrt
inline
            
mpd_qshiftr_inplace will not be inlined into mpd_qsqrt 
mpd_qsqrt
7801
        }
7802
        else {
7803
            if (!mpd_qshiftl(result, result, -shift, status)) {
inline
                 
mpd_qshiftl too costly to inline (cost=320, threshold=250) 
mpd_qsqrt
inline
                 
mpd_qshiftl will not be inlined into mpd_qsqrt 
mpd_qsqrt
7804
                goto malloc_error;
7805
            }
7806
        }
7807
        ideal_exp += shift;
7808
    }
7809
    else {
7810
        int lsd = (int)mpd_lsd(result->data[0]);
inline
                       
mpd_lsd should always be inlined (cost=always) 
mpd_qsqrt
inline
                       
mpd_lsd inlined into mpd_qsqrt 
mpd_qsqrt
gvn
                                       
load of type i64* not eliminated because it is clobbered by call 
mpd_qsqrt
7811
        if (lsd == 0 || lsd == 5) {
7812
            result->data[0] += 1;
gvn
                    
load of type i64* eliminated in favor of load 
mpd_qsqrt
gvn
                            
load of type i64 eliminated in favor of load 
mpd_qsqrt
7813
        }
7814
    }
7815
7816
    result->exp = ideal_exp;
7817
7818
7819
out:
7820
    mpd_del(&c);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qsqrt
inline
    
mpd_del inlined into mpd_qsqrt 
mpd_qsqrt
7821
    mpd_del(&q);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qsqrt
inline
    
mpd_del inlined into mpd_qsqrt 
mpd_qsqrt
7822
    mpd_del(&r);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qsqrt
inline
    
mpd_del inlined into mpd_qsqrt 
mpd_qsqrt
7823
    maxcontext = *ctx;
7824
    maxcontext.round = MPD_ROUND_HALF_EVEN;
7825
    mpd_qfinalize(result, &maxcontext, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qsqrt
inline
    
mpd_qfinalize will not be inlined into mpd_qsqrt 
mpd_qsqrt
7826
    return;
7827
7828
malloc_error:
7829
    mpd_seterror(result, MPD_Malloc_error, status);
inline
    
mpd_seterror can be inlined into mpd_qsqrt with cost=130 (threshold=250) 
mpd_qsqrt
inline
    
mpd_seterror inlined into mpd_qsqrt 
mpd_qsqrt
7830
    goto out;
7831
}
7832
7833
7834
/******************************************************************************/
7835
/*                              Base conversions                              */
7836
/******************************************************************************/
7837
7838
/* Space needed to represent an integer mpd_t in base 'base'. */
7839
size_t
7840
mpd_sizeinbase(const mpd_t *a, uint32_t base)
7841
{
7842
    double x;
7843
    size_t digits;
7844
7845
    assert(mpd_isinteger(a));
7846
    assert(base >= 2);
7847
7848
    if (mpd_iszero(a)) {
inline
        
mpd_iszero should always be inlined (cost=always) 
mpd_sizeinbase
inline
        
mpd_iszero inlined into mpd_sizeinbase 
mpd_sizeinbase
7849
        return 1;
7850
    }
7851
7852
    digits = a->digits+a->exp;
7853
    assert(digits > 0);
7854
7855
#ifdef CONFIG_64
7856
    /* ceil(2711437152599294 / log10(2)) + 4 == 2**53 */
7857
    if (digits > 2711437152599294ULL) {
7858
        return SIZE_MAX;
7859
    }
7860
#endif
7861
7862
    x = (double)digits / log10(base);
inline
                         
log10 will not be inlined into mpd_sizeinbase because its definition is unavailable 
mpd_sizeinbase
7863
    return (x > SIZE_MAX-1) ? SIZE_MAX : (size_t)x + 1;
7864
}
7865
7866
/* Space needed to import a base 'base' integer of length 'srclen'. */
7867
static mpd_ssize_t
7868
_mpd_importsize(size_t srclen, uint32_t base)
7869
{
7870
    double x;
7871
7872
    assert(srclen > 0);
7873
    assert(base >= 2);
7874
7875
#if SIZE_MAX == UINT64_MAX
7876
    if (srclen > (1ULL<<53)) {
7877
        return MPD_SSIZE_MAX;
7878
    }
7879
#endif
7880
7881
    x = (double)srclen * (log10(base)/MPD_RDIGITS);
inline
                          
log10 will not be inlined into _mpd_importsize because its definition is unavailable 
_mpd_importsize
7882
    return (x >= MPD_MAXIMPORT) ? MPD_SSIZE_MAX : (mpd_ssize_t)x + 1;
7883
}
7884
7885
static uint8_t
7886
mpd_resize_u16(uint16_t **w, size_t nmemb)
7887
{
7888
    uint8_t err = 0;
7889
    *w = mpd_realloc(*w, nmemb, sizeof **w, &err);
inline
         
mpd_realloc will not be inlined into mpd_resize_u16 because its definition is unavailable 
mpd_resize_u16
licm
                     
hosting bitcast 
_baseconv_to_u16
licm
                     
failed to move load with loop-invariant address because the loop may invalidate its value 
_baseconv_to_u16
licm
                     
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qexport_u16
gvn
                     
load of type i8* not eliminated because it is clobbered by call 
mpd_qexport_u16
7890
    return !err;
gvn
            
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_resize_u16
licm
            
failed to move load with loop-invariant address because the loop may invalidate its value 
_baseconv_to_u16
gvn
            
load of type i8 not eliminated in favor of store because it is clobbered by call 
_baseconv_to_u16
licm
            
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qexport_u16
gvn
            
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qexport_u16
7891
}
7892
7893
static uint8_t
7894
mpd_resize_u32(uint32_t **w, size_t nmemb)
7895
{
7896
    uint8_t err = 0;
7897
    *w = mpd_realloc(*w, nmemb, sizeof **w, &err);
inline
         
mpd_realloc will not be inlined into mpd_resize_u32 because its definition is unavailable 
mpd_resize_u32
licm
                     
hosting bitcast 
_baseconv_to_smaller
licm
                     
failed to move load with loop-invariant address because the loop may invalidate its value 
_baseconv_to_smaller
licm
                     
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qexport_u32
gvn
                     
load of type i8* not eliminated because it is clobbered by call 
mpd_qexport_u32
7898
    return !err;
gvn
            
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_resize_u32
licm
            
failed to move load with loop-invariant address because the loop may invalidate its value 
_baseconv_to_smaller
gvn
            
load of type i8 not eliminated in favor of store because it is clobbered by call 
_baseconv_to_smaller
licm
            
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qexport_u32
gvn
            
load of type i8 not eliminated in favor of store because it is clobbered by call 
mpd_qexport_u32
7899
}
7900
7901
static size_t
7902
_baseconv_to_u16(uint16_t **w, size_t wlen, mpd_uint_t wbase,
7903
                 mpd_uint_t *u, mpd_ssize_t ulen)
7904
{
7905
    size_t n = 0;
7906
7907
    assert(wlen > 0 && ulen > 0);
7908
    assert(wbase <= (1U<<16));
7909
7910
    do {
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qexport_u16
loop-vectorize
    
loop not vectorized 
mpd_qexport_u16
7911
        if (n >= wlen) {
7912
            if (!mpd_resize_u16(w, n+1)) {
inline
                 
mpd_resize_u16 can be inlined into _baseconv_to_u16 with cost=-14960 (threshold=375) 
_baseconv_to_u16
inline
                 
mpd_resize_u16 inlined into _baseconv_to_u16 
_baseconv_to_u16
7913
                return SIZE_MAX;
7914
            }
7915
            wlen = n+1;
7916
        }
7917
        (*w)[n++] = (uint16_t)_mpd_shortdiv(u, u, ulen, wbase);
inline
                              
_mpd_shortdiv will not be inlined into _baseconv_to_u16 because its definition is unavailable 
_baseconv_to_u16
licm
         
failed to move load with loop-invariant address because the loop may invalidate its value 
_baseconv_to_u16
gvn
         
load of type i16* not eliminated because it is clobbered by call 
_baseconv_to_u16
licm
         
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qexport_u16
gvn
         
load of type i16* not eliminated in favor of load because it is clobbered by call 
mpd_qexport_u16
7918
        /* ulen is at least 1. u[ulen-1] can only be zero if ulen == 1. */
7919
        ulen = _mpd_real_size(u, ulen);
inline
               
_mpd_real_size can be inlined into _baseconv_to_u16 with cost=-5 (threshold=325) 
_baseconv_to_u16
inline
               
_mpd_real_size inlined into _baseconv_to_u16 
_baseconv_to_u16
7920
7921
    } while (u[ulen-1] != 0);
gvn
             
load eliminated by PRE 
_baseconv_to_u16
7922
7923
    return n;
7924
}
7925
7926
static size_t
7927
_coeff_from_u16(mpd_t *w, mpd_ssize_t wlen,
7928
                const mpd_uint_t *u, size_t ulen, uint32_t ubase,
7929
                uint32_t *status)
7930
{
7931
    mpd_ssize_t n = 0;
7932
    mpd_uint_t carry;
7933
7934
    assert(wlen > 0 && ulen > 0);
7935
    assert(ubase <= (1U<<16));
7936
7937
    w->data[n++] = u[--ulen];
gvn
                   
load of type i64 not eliminated because it is clobbered by call 
mpd_qimport_u16
gvn
       
load of type i64* not eliminated because it is clobbered by call 
mpd_qimport_u16
7938
    while (--ulen != SIZE_MAX) {
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qimport_u16
loop-vectorize
    
loop not vectorized 
mpd_qimport_u16
7939
        carry = _mpd_shortmul_c(w->data, w->data, n, ubase);
inline
                
_mpd_shortmul_c will not be inlined into _coeff_from_u16 because its definition is unavailable 
_coeff_from_u16
licm
                                   
failed to move load with loop-invariant address because the loop may invalidate its value 
_coeff_from_u16
licm
                                                     
hosting zext 
_coeff_from_u16
gvn
                                   
load of type i64* not eliminated in favor of load because it is clobbered by call 
_coeff_from_u16
licm
                                   
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qimport_u16
gvn
                                   
load of type i64* not eliminated in favor of load because it is clobbered by call 
mpd_qimport_u16
7940
        if (carry) {
7941
            if (n >= wlen) {
7942
                if (!mpd_qresize(w, n+1, status)) {
inline
                     
mpd_qresize should always be inlined (cost=always) 
_coeff_from_u16
inline
                     
mpd_qresize inlined into _coeff_from_u16 
_coeff_from_u16
7943
                    return SIZE_MAX;
7944
                }
7945
                wlen = n+1;
7946
            }
7947
            w->data[n++] = carry;
licm
               
failed to move load with loop-invariant address because the loop may invalidate its value 
_coeff_from_u16
gvn
               
load of type i64* not eliminated because it is clobbered by call 
_coeff_from_u16
licm
               
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qimport_u16
gvn
               
load of type i64* not eliminated because it is clobbered by call 
mpd_qimport_u16
7948
        }
7949
        carry = _mpd_shortadd(w->data, n, u[ulen]);
inline
                
_mpd_shortadd will not be inlined into _coeff_from_u16 because its definition is unavailable 
_coeff_from_u16
licm
                                 
failed to move load with loop-invariant address because the loop may invalidate its value 
_coeff_from_u16
gvn
                                 
load of type i64* not eliminated because it is clobbered by call 
_coeff_from_u16
gvn
                                 
load eliminated by PRE 
_coeff_from_u16
gvn
                                          
load of type i64 not eliminated because it is clobbered by call 
_coeff_from_u16
gvn
                                          
load of type i64 not eliminated because it is clobbered by store 
mpd_qimport_u16
7950
        if (carry) {
7951
            if (n >= wlen) {
7952
                if (!mpd_qresize(w, n+1, status)) {
inline
                     
mpd_qresize should always be inlined (cost=always) 
_coeff_from_u16
inline
                     
mpd_qresize inlined into _coeff_from_u16 
_coeff_from_u16
7953
                    return SIZE_MAX;
7954
                }
7955
                wlen = n+1;
7956
            }
7957
            w->data[n++] = carry;
licm
               
failed to move load with loop-invariant address because the loop may invalidate its value 
_coeff_from_u16
gvn
               
load of type i64* not eliminated because it is clobbered by call 
_coeff_from_u16
licm
               
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qimport_u16
gvn
               
load of type i64* not eliminated because it is clobbered by call 
mpd_qimport_u16
7958
        }
7959
    }
7960
7961
    return n;
7962
}
7963
7964
/* target base wbase < source base ubase */
7965
static size_t
7966
_baseconv_to_smaller(uint32_t **w, size_t wlen, uint32_t wbase,
7967
                     mpd_uint_t *u, mpd_ssize_t ulen, mpd_uint_t ubase)
7968
{
7969
    size_t n = 0;
7970
7971
    assert(wlen > 0 && ulen > 0);
7972
    assert(wbase < ubase);
7973
7974
    do {
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qexport_u32
loop-vectorize
    
loop not vectorized 
mpd_qexport_u32
7975
        if (n >= wlen) {
7976
            if (!mpd_resize_u32(w, n+1)) {
inline
                 
mpd_resize_u32 can be inlined into _baseconv_to_smaller with cost=-14960 (threshold=375) 
_baseconv_to_smaller
inline
                 
mpd_resize_u32 inlined into _baseconv_to_smaller 
_baseconv_to_smaller
7977
                return SIZE_MAX;
7978
            }
7979
            wlen = n+1;
7980
        }
7981
        (*w)[n++] = (uint32_t)_mpd_shortdiv_b(u, u, ulen, wbase, ubase);
inline
                              
_mpd_shortdiv_b will not be inlined into _baseconv_to_smaller because its definition is unavailable 
_baseconv_to_smaller
licm
                                                          
hosting zext 
_baseconv_to_smaller
licm
         
failed to move load with loop-invariant address because the loop may invalidate its value 
_baseconv_to_smaller
gvn
         
load of type i32* not eliminated because it is clobbered by call 
_baseconv_to_smaller
licm
         
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qexport_u32
gvn
         
load of type i32* not eliminated in favor of load because it is clobbered by call 
mpd_qexport_u32
7982
        /* ulen is at least 1. u[ulen-1] can only be zero if ulen == 1. */
7983
        ulen = _mpd_real_size(u, ulen);
inline
               
_mpd_real_size can be inlined into _baseconv_to_smaller with cost=-15005 (threshold=325) 
_baseconv_to_smaller
inline
               
_mpd_real_size inlined into _baseconv_to_smaller 
_baseconv_to_smaller
7984
7985
    } while (u[ulen-1] != 0);
gvn
             
load eliminated by PRE 
_baseconv_to_smaller
7986
7987
    return n;
7988
}
7989
7990
#ifdef CONFIG_32
7991
/* target base 'wbase' == source base 'ubase' */
7992
static size_t
7993
_copy_equal_base(uint32_t **w, size_t wlen,
7994
                 const uint32_t *u, size_t ulen)
7995
{
7996
    if (wlen < ulen) {
7997
        if (!mpd_resize_u32(w, ulen)) {
7998
            return SIZE_MAX;
7999
        }
8000
    }
8001
8002
    memcpy(*w, u, ulen * (sizeof **w));
8003
    return ulen;
8004
}
8005
8006
/* target base 'wbase' > source base 'ubase' */
8007
static size_t
8008
_baseconv_to_larger(uint32_t **w, size_t wlen, mpd_uint_t wbase,
8009
                    const mpd_uint_t *u, size_t ulen, mpd_uint_t ubase)
8010
{
8011
    size_t n = 0;
8012
    mpd_uint_t carry;
8013
8014
    assert(wlen > 0 && ulen > 0);
8015
    assert(ubase < wbase);
8016
8017
    (*w)[n++] = u[--ulen];
8018
    while (--ulen != SIZE_MAX) {
8019
        carry = _mpd_shortmul_b(*w, *w, n, ubase, wbase);
8020
        if (carry) {
8021
            if (n >= wlen) {
8022
                if (!mpd_resize_u32(w, n+1)) {
8023
                    return SIZE_MAX;
8024
                }
8025
                wlen = n+1;
8026
            }
8027
            (*w)[n++] = carry;
8028
        }
8029
        carry = _mpd_shortadd_b(*w, n, u[ulen], wbase);
8030
        if (carry) {
8031
            if (n >= wlen) {
8032
                if (!mpd_resize_u32(w, n+1)) {
8033
                    return SIZE_MAX;
8034
                }
8035
                wlen = n+1;
8036
            }
8037
            (*w)[n++] = carry;
8038
        }
8039
    }
8040
8041
    return n;
8042
}
8043
8044
/* target base wbase < source base ubase */
8045
static size_t
8046
_coeff_from_larger_base(mpd_t *w, size_t wlen, mpd_uint_t wbase,
8047
                        mpd_uint_t *u, mpd_ssize_t ulen, mpd_uint_t ubase,
8048
                        uint32_t *status)
8049
{
8050
    size_t n = 0;
8051
8052
    assert(wlen > 0 && ulen > 0);
8053
    assert(wbase < ubase);
8054
8055
    do {
8056
        if (n >= wlen) {
8057
            if (!mpd_qresize(w, n+1, status)) {
8058
                return SIZE_MAX;
8059
            }
8060
            wlen = n+1;
8061
        }
8062
        w->data[n++] = (uint32_t)_mpd_shortdiv_b(u, u, ulen, wbase, ubase);
8063
        /* ulen is at least 1. u[ulen-1] can only be zero if ulen == 1. */
8064
        ulen = _mpd_real_size(u, ulen);
8065
8066
    } while (u[ulen-1] != 0);
8067
8068
    return n;
8069
}
8070
#endif
8071
8072
/* target base 'wbase' > source base 'ubase' */
8073
static size_t
8074
_coeff_from_smaller_base(mpd_t *w, mpd_ssize_t wlen, mpd_uint_t wbase,
8075
                         const uint32_t *u, size_t ulen, mpd_uint_t ubase,
8076
                         uint32_t *status)
8077
{
8078
    mpd_ssize_t n = 0;
8079
    mpd_uint_t carry;
8080
8081
    assert(wlen > 0 && ulen > 0);
8082
    assert(wbase > ubase);
8083
8084
    w->data[n++] = u[--ulen];
gvn
                   
load of type i32 not eliminated because it is clobbered by call 
mpd_qimport_u32
gvn
       
load of type i64* not eliminated because it is clobbered by call 
mpd_qimport_u32
8085
    while (--ulen != SIZE_MAX) {
loop-vectorize
    
loop not vectorized: loop control flow is not understood by vectorizer 
mpd_qimport_u32
loop-vectorize
    
loop not vectorized 
mpd_qimport_u32
8086
        carry = _mpd_shortmul_b(w->data, w->data, n, ubase, wbase);
inline
                
_mpd_shortmul_b will not be inlined into _coeff_from_smaller_base because its definition is unavailable 
_coeff_from_smaller_base
licm
                                   
failed to move load with loop-invariant address because the loop may invalidate its value 
_coeff_from_smaller_base
gvn
                                   
load of type i64* not eliminated in favor of load because it is clobbered by call 
_coeff_from_smaller_base
licm
                                   
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qimport_u32
gvn
                                   
load of type i64* not eliminated in favor of load because it is clobbered by call 
mpd_qimport_u32
8087
        if (carry) {
8088
            if (n >= wlen) {
8089
                if (!mpd_qresize(w, n+1, status)) {
inline
                     
mpd_qresize should always be inlined (cost=always) 
_coeff_from_smaller_base
inline
                     
mpd_qresize inlined into _coeff_from_smaller_base 
_coeff_from_smaller_base
8090
                    return SIZE_MAX;
8091
                }
8092
                wlen = n+1;
8093
            }
8094
            w->data[n++] = carry;
licm
               
failed to move load with loop-invariant address because the loop may invalidate its value 
_coeff_from_smaller_base
gvn
               
load of type i64* not eliminated because it is clobbered by call 
_coeff_from_smaller_base
licm
               
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qimport_u32
gvn
               
load of type i64* not eliminated because it is clobbered by call 
mpd_qimport_u32
8095
        }
8096
        carry = _mpd_shortadd_b(w->data, n, u[ulen], wbase);
inline
                
_mpd_shortadd_b will not be inlined into _coeff_from_smaller_base because its definition is unavailable 
_coeff_from_smaller_base
licm
                                   
failed to move load with loop-invariant address because the loop may invalidate its value 
_coeff_from_smaller_base
gvn
                                   
load of type i64* not eliminated because it is clobbered by call 
_coeff_from_smaller_base
gvn
                                   
load eliminated by PRE 
_coeff_from_smaller_base
gvn
                                            
load of type i32 not eliminated because it is clobbered by call 
_coeff_from_smaller_base
gvn
                                            
load of type i32 not eliminated because it is clobbered by call 
mpd_qimport_u32
8097
        if (carry) {
8098
            if (n >= wlen) {
8099
                if (!mpd_qresize(w, n+1, status)) {
inline
                     
mpd_qresize should always be inlined (cost=always) 
_coeff_from_smaller_base
inline
                     
mpd_qresize inlined into _coeff_from_smaller_base 
_coeff_from_smaller_base
8100
                    return SIZE_MAX;
8101
                }
8102
                wlen = n+1;
8103
            }
8104
            w->data[n++] = carry;
licm
               
failed to move load with loop-invariant address because the loop may invalidate its value 
_coeff_from_smaller_base
gvn
               
load of type i64* not eliminated because it is clobbered by call 
_coeff_from_smaller_base
licm
               
failed to move load with loop-invariant address because the loop may invalidate its value 
mpd_qimport_u32
gvn
               
load of type i64* not eliminated because it is clobbered by call 
mpd_qimport_u32
8105
        }
8106
    }
8107
8108
    return n;
8109
}
8110
8111
/*
8112
 * Convert an integer mpd_t to a multiprecision integer with base <= 2**16.
8113
 * The least significant word of the result is (*rdata)[0].
8114
 *
8115
 * If rdata is NULL, space is allocated by the function and rlen is irrelevant.
8116
 * In case of an error any allocated storage is freed and rdata is set back to
8117
 * NULL.
8118
 *
8119
 * If rdata is non-NULL, it MUST be allocated by one of libmpdec's allocation
8120
 * functions and rlen MUST be correct. If necessary, the function will resize
8121
 * rdata. In case of an error the caller must free rdata.
8122
 *
8123
 * Return value: In case of success, the exact length of rdata, SIZE_MAX
8124
 * otherwise.
8125
 */
8126
size_t
8127
mpd_qexport_u16(uint16_t **rdata, size_t rlen, uint32_t rbase,
8128
                const mpd_t *src, uint32_t *status)
8129
{
8130
    MPD_NEW_STATIC(tsrc,0,0,0,0);
8131
    int alloc = 0; /* rdata == NULL */
8132
    size_t n;
8133
8134
    assert(rbase <= (1U<<16));
8135
8136
    if (mpd_isspecial(src) || !_mpd_isint(src)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qexport_u16
inline
        
mpd_isspecial inlined into mpd_qexport_u16 
mpd_qexport_u16
inline
                               
_mpd_isint can be inlined into mpd_qexport_u16 with cost=110 (threshold=250) 
mpd_qexport_u16
inline
                               
_mpd_isint inlined into mpd_qexport_u16 
mpd_qexport_u16
8137
        *status |= MPD_Invalid_operation;
8138
        return SIZE_MAX;
8139
    }
8140
8141
    if (*rdata == NULL) {
8142
        rlen = mpd_sizeinbase(src, rbase);
inline
               
mpd_sizeinbase can be inlined into mpd_qexport_u16 with cost=110 (threshold=250) 
mpd_qexport_u16
inline
               
mpd_sizeinbase inlined into mpd_qexport_u16 
mpd_qexport_u16
8143
        if (rlen == SIZE_MAX) {
8144
            *status |= MPD_Invalid_operation;
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
mpd_qexport_u16
8145
            return SIZE_MAX;
8146
        }
8147
        *rdata = mpd_alloc(rlen, sizeof **rdata);
inline
                 
mpd_alloc will not be inlined into mpd_qexport_u16 because its definition is unavailable 
mpd_qexport_u16
8148
        if (*rdata == NULL) {
8149
            goto malloc_error;
8150
        }
8151
        alloc = 1;
8152
    }
8153
8154
    if (mpd_iszero(src)) {
inline
        
mpd_iszero should always be inlined (cost=always) 
mpd_qexport_u16
inline
        
mpd_iszero inlined into mpd_qexport_u16 
mpd_qexport_u16
8155
        **rdata = 0;
gvn
         
load of type i16* eliminated in favor of phi 
mpd_qexport_u16
8156
        return 1;
8157
    }
8158
8159
    if (src->exp >= 0) {
gvn
             
load of type i64 not eliminated because it is clobbered by call 
mpd_qexport_u16
8160
        if (!mpd_qshiftl(&tsrc, src, src->exp, status)) {
inline
             
mpd_qshiftl too costly to inline (cost=575, threshold=250) 
mpd_qexport_u16
inline
             
mpd_qshiftl will not be inlined into mpd_qexport_u16 
mpd_qexport_u16
8161
            goto malloc_error;
8162
        }
8163
    }
8164
    else {
8165
        if (mpd_qshiftr(&tsrc, src, -src->exp, status) == MPD_UINT_MAX) {
inline
            
mpd_qshiftr too costly to inline (cost=630, threshold=625) 
mpd_qexport_u16
inline
            
mpd_qshiftr will not be inlined into mpd_qexport_u16 
mpd_qexport_u16
8166
            goto malloc_error;
8167
        }
8168
    }
8169
8170
    n = _baseconv_to_u16(rdata, rlen, rbase, tsrc.data, tsrc.len);
inline
        
_baseconv_to_u16 can be inlined into mpd_qexport_u16 with cost=-14830 (threshold=250) 
mpd_qexport_u16
inline
        
_baseconv_to_u16 inlined into mpd_qexport_u16 
mpd_qexport_u16
gvn
                                                  
load of type i64* not eliminated in favor of store because it is clobbered by call 
mpd_qexport_u16
gvn
                                                             
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qexport_u16
8171
    if (n == SIZE_MAX) {
8172
        goto malloc_error;
8173
    }
8174
8175
8176
out:
8177
    mpd_del(&tsrc);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qexport_u16
inline
    
mpd_del inlined into mpd_qexport_u16 
mpd_qexport_u16
8178
    return n;
8179
8180
malloc_error:
8181
    if (alloc) {
8182
        mpd_free(*rdata);
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qexport_u16
gvn
                 
load of type i8* not eliminated because it is clobbered by call 
mpd_qexport_u16
8183
        *rdata = NULL;
8184
    }
8185
    n = SIZE_MAX;
8186
    *status |= MPD_Malloc_error;
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qexport_u16
8187
    goto out;
8188
}
8189
8190
/*
8191
 * Convert an integer mpd_t to a multiprecision integer with base<=UINT32_MAX.
8192
 * The least significant word of the result is (*rdata)[0].
8193
 *
8194
 * If rdata is NULL, space is allocated by the function and rlen is irrelevant.
8195
 * In case of an error any allocated storage is freed and rdata is set back to
8196
 * NULL.
8197
 *
8198
 * If rdata is non-NULL, it MUST be allocated by one of libmpdec's allocation
8199
 * functions and rlen MUST be correct. If necessary, the function will resize
8200
 * rdata. In case of an error the caller must free rdata.
8201
 *
8202
 * Return value: In case of success, the exact length of rdata, SIZE_MAX
8203
 * otherwise.
8204
 */
8205
size_t
8206
mpd_qexport_u32(uint32_t **rdata, size_t rlen, uint32_t rbase,
8207
                const mpd_t *src, uint32_t *status)
8208
{
8209
    MPD_NEW_STATIC(tsrc,0,0,0,0);
8210
    int alloc = 0; /* rdata == NULL */
8211
    size_t n;
8212
8213
    if (mpd_isspecial(src) || !_mpd_isint(src)) {
inline
        
mpd_isspecial should always be inlined (cost=always) 
mpd_qexport_u32
inline
        
mpd_isspecial inlined into mpd_qexport_u32 
mpd_qexport_u32
inline
                               
_mpd_isint can be inlined into mpd_qexport_u32 with cost=-14890 (threshold=250) 
mpd_qexport_u32
inline
                               
_mpd_isint inlined into mpd_qexport_u32 
mpd_qexport_u32
8214
        *status |= MPD_Invalid_operation;
8215
        return SIZE_MAX;
8216
    }
8217
8218
    if (*rdata == NULL) {
8219
        rlen = mpd_sizeinbase(src, rbase);
inline
               
mpd_sizeinbase can be inlined into mpd_qexport_u32 with cost=110 (threshold=250) 
mpd_qexport_u32
inline
               
mpd_sizeinbase inlined into mpd_qexport_u32 
mpd_qexport_u32
8220
        if (rlen == SIZE_MAX) {
8221
            *status |= MPD_Invalid_operation;
gvn
                    
load of type i32 not eliminated because it is clobbered by call 
mpd_qexport_u32
8222
            return SIZE_MAX;
8223
        }
8224
        *rdata = mpd_alloc(rlen, sizeof **rdata);
inline
                 
mpd_alloc will not be inlined into mpd_qexport_u32 because its definition is unavailable 
mpd_qexport_u32
8225
        if (*rdata == NULL) {
8226
            goto malloc_error;
8227
        }
8228
        alloc = 1;
8229
    }
8230
8231
    if (mpd_iszero(src)) {
inline
        
mpd_iszero should always be inlined (cost=always) 
mpd_qexport_u32
inline
        
mpd_iszero inlined into mpd_qexport_u32 
mpd_qexport_u32
8232
        **rdata = 0;
gvn
         
load of type i32* eliminated in favor of phi 
mpd_qexport_u32
8233
        return 1;
8234
    }
8235
8236
    if (src->exp >= 0) {
gvn
             
load of type i64 not eliminated because it is clobbered by call 
mpd_qexport_u32
8237
        if (!mpd_qshiftl(&tsrc, src, src->exp, status)) {
inline
             
mpd_qshiftl too costly to inline (cost=575, threshold=250) 
mpd_qexport_u32
inline
             
mpd_qshiftl will not be inlined into mpd_qexport_u32 
mpd_qexport_u32
8238
            goto malloc_error;
8239
        }
8240
    }
8241
    else {
8242
        if (mpd_qshiftr(&tsrc, src, -src->exp, status) == MPD_UINT_MAX) {
inline
            
mpd_qshiftr too costly to inline (cost=630, threshold=625) 
mpd_qexport_u32
inline
            
mpd_qshiftr will not be inlined into mpd_qexport_u32 
mpd_qexport_u32
8243
            goto malloc_error;
8244
        }
8245
    }
8246
8247
#ifdef CONFIG_64
8248
    n = _baseconv_to_smaller(rdata, rlen, rbase,
inline
        
_baseconv_to_smaller can be inlined into mpd_qexport_u32 with cost=-14825 (threshold=250) 
mpd_qexport_u32
inline
        
_baseconv_to_smaller inlined into mpd_qexport_u32 
mpd_qexport_u32
8249
                             tsrc.data, tsrc.len, MPD_RADIX);
gvn
                                  
load of type i64* not eliminated in favor of store because it is clobbered by call 
mpd_qexport_u32
gvn
                                             
load of type i64 not eliminated in favor of store because it is clobbered by call 
mpd_qexport_u32
8250
#else
8251
    if (rbase == MPD_RADIX) {
8252
        n = _copy_equal_base(rdata, rlen, tsrc.data, tsrc.len);
8253
    }
8254
    else if (rbase < MPD_RADIX) {
8255
        n = _baseconv_to_smaller(rdata, rlen, rbase,
8256
                                 tsrc.data, tsrc.len, MPD_RADIX);
8257
    }
8258
    else {
8259
        n = _baseconv_to_larger(rdata, rlen, rbase,
8260
                                tsrc.data, tsrc.len, MPD_RADIX);
8261
    }
8262
#endif
8263
8264
    if (n == SIZE_MAX) {
8265
        goto malloc_error;
8266
    }
8267
8268
8269
out:
8270
    mpd_del(&tsrc);
inline
    
mpd_del should always be inlined (cost=always) 
mpd_qexport_u32
inline
    
mpd_del inlined into mpd_qexport_u32 
mpd_qexport_u32
8271
    return n;
8272
8273
malloc_error:
8274
    if (alloc) {
8275
        mpd_free(*rdata);
gvn
        
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qexport_u32
gvn
                 
load of type i8* not eliminated because it is clobbered by call 
mpd_qexport_u32
8276
        *rdata = NULL;
8277
    }
8278
    n = SIZE_MAX;
8279
    *status |= MPD_Malloc_error;
gvn
            
load of type i32 not eliminated because it is clobbered by call 
mpd_qexport_u32
8280
    goto out;
8281
}
8282
8283
8284
/*
8285
 * Converts a multiprecision integer with base <= UINT16_MAX+1 to an mpd_t.
8286
 * The least significant word of the source is srcdata[0].
8287
 */
8288
void
8289
mpd_qimport_u16(mpd_t *result,
8290
                const uint16_t *srcdata, size_t srclen,
8291
                uint8_t srcsign, uint32_t srcbase,
8292
                const mpd_context_t *ctx, uint32_t *status)
8293
{
8294
    mpd_uint_t *usrc; /* uint16_t src copied to an mpd_uint_t array */
8295
    mpd_ssize_t rlen; /* length of the result */
8296
    size_t n;
8297
8298
    assert(srclen > 0);
8299
    assert(srcbase <= (1U<<16));
8300
8301
    rlen = _mpd_importsize(srclen, srcbase);
inline
           
_mpd_importsize can be inlined into mpd_qimport_u16 with cost=45 (threshold=250) 
mpd_qimport_u16
inline
           
_mpd_importsize inlined into mpd_qimport_u16 
mpd_qimport_u16
8302
    if (rlen == MPD_SSIZE_MAX) {
8303
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qimport_u16 with cost=130 (threshold=250) 
mpd_qimport_u16
inline
        
mpd_seterror inlined into mpd_qimport_u16 
mpd_qimport_u16
8304
        return;
8305
    }
8306
8307
    usrc = mpd_alloc((mpd_size_t)srclen, sizeof *usrc);
inline
           
mpd_alloc will not be inlined into mpd_qimport_u16 because its definition is unavailable 
mpd_qimport_u16
8308
    if (usrc == NULL) {
8309
        mpd_seterror(result, MPD_Malloc_error, status);
inline
        
mpd_seterror can be inlined into mpd_qimport_u16 with cost=130 (threshold=250) 
mpd_qimport_u16
inline
        
mpd_seterror inlined into mpd_qimport_u16 
mpd_qimport_u16
8310
        return;
8311
    }
8312
    for (n = 0; n < srclen; n++) {
loop-vectorize
    
vectorized loop (vectorization width: 2, interleaved count: 2) 
mpd_qimport_u16
loop-unroll
    
unrolled loop by a factor of 2 with run-time trip count 
mpd_qimport_u16
8313
        usrc[n] = srcdata[n];
gvn
                  
load of type i16 not eliminated because it is clobbered by call 
mpd_qimport_u16
8314
    }
8315
8316
    if (!mpd_qresize(result, rlen, status)) {
inline
         
mpd_qresize should always be inlined (cost=always) 
mpd_qimport_u16
inline
         
mpd_qresize inlined into mpd_qimport_u16 
mpd_qimport_u16
8317
        goto finish;
8318
    }
8319
8320
    n = _coeff_from_u16(result, rlen, usrc, srclen, srcbase, status);
inline
        
_coeff_from_u16 can be inlined into mpd_qimport_u16 with cost=-14510 (threshold=250) 
mpd_qimport_u16
inline
        
_coeff_from_u16 inlined into mpd_qimport_u16 
mpd_qimport_u16
8321
    if (n == SIZE_MAX) {
8322
        goto finish;
8323
    }
8324
8325
    mpd_set_flags(result, srcsign);
inline
    
mpd_set_flags should always be inlined (cost=always) 
mpd_qimport_u16
inline
    
mpd_set_flags inlined into mpd_qimport_u16 
mpd_qimport_u16
8326
    result->exp = 0;
8327
    result->len = n;
8328
    mpd_setdigits(result);
inline
    
mpd_setdigits can be inlined into mpd_qimport_u16 with cost=295 (threshold=325) 
mpd_qimport_u16
inline
    
mpd_setdigits inlined into mpd_qimport_u16 
mpd_qimport_u16
8329
8330
    mpd_qresize(result, result->len, status);
inline
    
mpd_qresize should always be inlined (cost=always) 
mpd_qimport_u16
inline
    
mpd_qresize inlined into mpd_qimport_u16 
mpd_qimport_u16
gvn
                                
load of type i64 eliminated in favor of phi 
mpd_qimport_u16
8331
    mpd_qfinalize(result, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qimport_u16
inline
    
mpd_qfinalize will not be inlined into mpd_qimport_u16 
mpd_qimport_u16
8332
8333
8334
finish:
8335
    mpd_free(usrc);
gvn
    
load of type void (i8*)* not eliminated because it is clobbered by call 
mpd_qimport_u16
8336
}
8337
8338
/*
8339
 * Converts a multiprecision integer with base <= UINT32_MAX to an mpd_t.
8340
 * The least significant word of the source is srcdata[0].
8341
 */
8342
void
8343
mpd_qimport_u32(mpd_t *result,
8344
                const uint32_t *srcdata, size_t srclen,
8345
                uint8_t srcsign, uint32_t srcbase,
8346
                const mpd_context_t *ctx, uint32_t *status)
8347
{
8348
    mpd_ssize_t rlen; /* length of the result */
8349
    size_t n;
8350
8351
    assert(srclen > 0);
8352
8353
    rlen = _mpd_importsize(srclen, srcbase);
inline
           
_mpd_importsize can be inlined into mpd_qimport_u32 with cost=-14955 (threshold=250) 
mpd_qimport_u32
inline
           
_mpd_importsize inlined into mpd_qimport_u32 
mpd_qimport_u32
8354
    if (rlen == MPD_SSIZE_MAX) {
8355
        mpd_seterror(result, MPD_Invalid_operation, status);
inline
        
mpd_seterror can be inlined into mpd_qimport_u32 with cost=130 (threshold=250) 
mpd_qimport_u32
inline
        
mpd_seterror inlined into mpd_qimport_u32 
mpd_qimport_u32
8356
        return;
8357
    }
8358
8359
    if (!mpd_qresize(result, rlen, status)) {
inline
         
mpd_qresize should always be inlined (cost=always) 
mpd_qimport_u32
inline
         
mpd_qresize inlined into mpd_qimport_u32 
mpd_qimport_u32
8360
        return;
8361
    }
8362
8363
#ifdef CONFIG_64
8364
    n = _coeff_from_smaller_base(result, rlen, MPD_RADIX,
inline
        
_coeff_from_smaller_base can be inlined into mpd_qimport_u32 with cost=-14500 (threshold=250) 
mpd_qimport_u32
inline
        
_coeff_from_smaller_base inlined into mpd_qimport_u32 
mpd_qimport_u32
8365
                                 srcdata, srclen, srcbase,
8366
                                 status);
8367
#else
8368
    if (srcbase == MPD_RADIX) {
8369
        if (!mpd_qresize(result, srclen, status)) {
8370
            return;
8371
        }
8372
        memcpy(result->data, srcdata, srclen * (sizeof *srcdata));
8373
        n = srclen;
8374
    }
8375
    else if (srcbase < MPD_RADIX) {
8376
        n = _coeff_from_smaller_base(result, rlen, MPD_RADIX,
8377
                                     srcdata, srclen, srcbase,
8378
                                     status);
8379
    }
8380
    else {
8381
        mpd_uint_t *usrc = mpd_alloc((mpd_size_t)srclen, sizeof *usrc);
8382
        if (usrc == NULL) {
8383
            mpd_seterror(result, MPD_Malloc_error, status);
8384
            return;
8385
        }
8386
        for (n = 0; n < srclen; n++) {
8387
            usrc[n] = srcdata[n];
8388
        }
8389
8390
        n = _coeff_from_larger_base(result, rlen, MPD_RADIX,
8391
                                    usrc, (mpd_ssize_t)srclen, srcbase,
8392
                                    status);
8393
        mpd_free(usrc);
8394
    }
8395
#endif
8396
8397
    if (n == SIZE_MAX) {
8398
        return;
8399
    }
8400
8401
    mpd_set_flags(result, srcsign);
inline
    
mpd_set_flags should always be inlined (cost=always) 
mpd_qimport_u32
inline
    
mpd_set_flags inlined into mpd_qimport_u32 
mpd_qimport_u32
8402
    result->exp = 0;
8403
    result->len = n;
8404
    mpd_setdigits(result);
inline
    
mpd_setdigits can be inlined into mpd_qimport_u32 with cost=295 (threshold=325) 
mpd_qimport_u32
inline
    
mpd_setdigits inlined into mpd_qimport_u32 
mpd_qimport_u32
8405
8406
    mpd_qresize(result, result->len, status);
inline
    
mpd_qresize should always be inlined (cost=always) 
mpd_qimport_u32
inline
    
mpd_qresize inlined into mpd_qimport_u32 
mpd_qimport_u32
gvn
                                
load of type i64 eliminated in favor of phi 
mpd_qimport_u32
8407
    mpd_qfinalize(result, ctx, status);
inline
    
mpd_qfinalize too costly to inline (cost=635, threshold=625) 
mpd_qimport_u32
inline
    
mpd_qfinalize will not be inlined into mpd_qimport_u32 
mpd_qimport_u32
8408
}
8409
8410
8411
8412
8413